1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
265 sha256_of_onion: [u8; 32],
269 macro_rules! define_state_flags {
270 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
271 #[doc = $flag_type_doc]
272 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273 struct $flag_type(u32);
278 const $flag: $flag_type = $flag_type($value);
281 /// All flags that apply to the specified [`ChannelState`] variant.
283 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
286 fn new() -> Self { Self(0) }
289 fn from_u32(flags: u32) -> Result<Self, ()> {
290 if flags & !Self::ALL.0 != 0 {
293 Ok($flag_type(flags))
298 fn is_empty(&self) -> bool { self.0 == 0 }
301 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
304 impl core::ops::Not for $flag_type {
306 fn not(self) -> Self::Output { Self(!self.0) }
308 impl core::ops::BitOr for $flag_type {
310 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
312 impl core::ops::BitOrAssign for $flag_type {
313 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
315 impl core::ops::BitAnd for $flag_type {
317 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
319 impl core::ops::BitAndAssign for $flag_type {
320 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
323 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
324 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
326 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
327 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
328 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
330 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
332 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
333 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
335 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
337 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
339 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
340 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
342 impl PartialEq<FundedStateFlags> for $flag_type {
343 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
345 impl From<FundedStateFlags> for $flag_type {
346 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
351 /// We declare all the states/flags here together to help determine which bits are still available
354 pub const OUR_INIT_SENT: u32 = 1 << 0;
355 pub const THEIR_INIT_SENT: u32 = 1 << 1;
356 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
357 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
358 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
359 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
360 pub const CHANNEL_READY: u32 = 1 << 6;
361 pub const PEER_DISCONNECTED: u32 = 1 << 7;
362 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
363 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
364 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
365 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
366 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
367 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
371 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
373 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
374 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
375 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
376 somewhere and we should pause sending any outbound messages until they've managed to \
377 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
378 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
379 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
380 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
381 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
382 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
387 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
388 NegotiatingFundingFlags, [
389 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
390 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
391 ("Indicates we have received their `open_channel`/`accept_channel` message.",
392 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
397 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
398 FUNDED_STATE, AwaitingChannelReadyFlags, [
399 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
400 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
401 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
402 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
403 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
404 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
405 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
406 is being held until all channels in the batch have received `funding_signed` and have \
407 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
412 "Flags that only apply to [`ChannelState::ChannelReady`].",
413 FUNDED_STATE, ChannelReadyFlags, [
414 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
415 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
416 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
417 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
418 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
422 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
424 /// We are negotiating the parameters required for the channel prior to funding it.
425 NegotiatingFunding(NegotiatingFundingFlags),
426 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
427 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
428 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
430 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
431 /// funding transaction to confirm.
432 AwaitingChannelReady(AwaitingChannelReadyFlags),
433 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
435 ChannelReady(ChannelReadyFlags),
436 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
437 /// is about to drop us, but we store this anyway.
441 macro_rules! impl_state_flag {
442 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
444 fn $get(&self) -> bool {
447 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
456 ChannelState::$state(flags) => *flags |= $state_flag,
458 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
462 fn $clear(&mut self) {
465 ChannelState::$state(flags) => *flags &= !($state_flag),
467 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
471 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
472 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
474 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
475 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
480 fn from_u32(state: u32) -> Result<Self, ()> {
482 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
483 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
485 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
486 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
487 .map(|flags| ChannelState::AwaitingChannelReady(flags))
488 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
489 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
490 .map(|flags| ChannelState::ChannelReady(flags))
491 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
492 Ok(ChannelState::NegotiatingFunding(flags))
500 fn to_u32(&self) -> u32 {
502 ChannelState::NegotiatingFunding(flags) => flags.0,
503 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
504 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
505 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
506 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
510 fn is_pre_funded_state(&self) -> bool {
511 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
514 fn is_both_sides_shutdown(&self) -> bool {
515 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
518 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
520 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
521 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
522 _ => FundedStateFlags::new(),
526 fn should_force_holding_cell(&self) -> bool {
528 ChannelState::ChannelReady(flags) =>
529 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
530 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
531 flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
533 debug_assert!(false, "The holding cell is only valid within ChannelReady");
539 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
540 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
541 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
542 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
543 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
544 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
545 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
546 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
547 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
548 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
549 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
550 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
551 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
552 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
553 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
554 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
557 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
559 pub const DEFAULT_MAX_HTLCS: u16 = 50;
561 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
562 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
563 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
564 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
568 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
570 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
572 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
574 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
575 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
576 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
577 /// `holder_max_htlc_value_in_flight_msat`.
578 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
580 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
581 /// `option_support_large_channel` (aka wumbo channels) is not supported.
583 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
585 /// Total bitcoin supply in satoshis.
586 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
588 /// The maximum network dust limit for standard script formats. This currently represents the
589 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
590 /// transaction non-standard and thus refuses to relay it.
591 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
592 /// implementations use this value for their dust limit today.
593 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
595 /// The maximum channel dust limit we will accept from our counterparty.
596 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
598 /// The dust limit is used for both the commitment transaction outputs as well as the closing
599 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
600 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
601 /// In order to avoid having to concern ourselves with standardness during the closing process, we
602 /// simply require our counterparty to use a dust limit which will leave any segwit output
604 /// See <https://github.com/lightning/bolts/issues/905> for more details.
605 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
607 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
608 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
610 /// Used to return a simple Error back to ChannelManager. Will get converted to a
611 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
612 /// channel_id in ChannelManager.
613 pub(super) enum ChannelError {
619 impl fmt::Debug for ChannelError {
620 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
622 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
623 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
624 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
629 impl fmt::Display for ChannelError {
630 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
632 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
633 &ChannelError::Warn(ref e) => write!(f, "{}", e),
634 &ChannelError::Close(ref e) => write!(f, "{}", e),
639 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
641 pub peer_id: Option<PublicKey>,
642 pub channel_id: Option<ChannelId>,
645 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
646 fn log(&self, mut record: Record) {
647 record.peer_id = self.peer_id;
648 record.channel_id = self.channel_id;
649 self.logger.log(record)
653 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
654 where L::Target: Logger {
655 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
656 where S::Target: SignerProvider
660 peer_id: Some(context.counterparty_node_id),
661 channel_id: Some(context.channel_id),
666 macro_rules! secp_check {
667 ($res: expr, $err: expr) => {
670 Err(_) => return Err(ChannelError::Close($err)),
675 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
676 /// our counterparty or not. However, we don't want to announce updates right away to avoid
677 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
678 /// our channel_update message and track the current state here.
679 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
680 #[derive(Clone, Copy, PartialEq)]
681 pub(super) enum ChannelUpdateStatus {
682 /// We've announced the channel as enabled and are connected to our peer.
684 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
686 /// Our channel is live again, but we haven't announced the channel as enabled yet.
688 /// We've announced the channel as disabled.
692 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
694 pub enum AnnouncementSigsState {
695 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
696 /// we sent the last `AnnouncementSignatures`.
698 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
699 /// This state never appears on disk - instead we write `NotSent`.
701 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
702 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
703 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
704 /// they send back a `RevokeAndACK`.
705 /// This state never appears on disk - instead we write `NotSent`.
707 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
708 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
712 /// An enum indicating whether the local or remote side offered a given HTLC.
718 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
721 pending_htlcs_value_msat: u64,
722 on_counterparty_tx_dust_exposure_msat: u64,
723 on_holder_tx_dust_exposure_msat: u64,
724 holding_cell_msat: u64,
725 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
728 /// An enum gathering stats on commitment transaction, either local or remote.
729 struct CommitmentStats<'a> {
730 tx: CommitmentTransaction, // the transaction info
731 feerate_per_kw: u32, // the feerate included to build the transaction
732 total_fee_sat: u64, // the total fee included in the transaction
733 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
734 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
735 local_balance_msat: u64, // local balance before fees but considering dust limits
736 remote_balance_msat: u64, // remote balance before fees but considering dust limits
737 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
738 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
741 /// Used when calculating whether we or the remote can afford an additional HTLC.
742 struct HTLCCandidate {
744 origin: HTLCInitiator,
748 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
756 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
758 enum UpdateFulfillFetch {
760 monitor_update: ChannelMonitorUpdate,
761 htlc_value_msat: u64,
762 msg: Option<msgs::UpdateFulfillHTLC>,
767 /// The return type of get_update_fulfill_htlc_and_commit.
768 pub enum UpdateFulfillCommitFetch {
769 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
770 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
771 /// previously placed in the holding cell (and has since been removed).
773 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
774 monitor_update: ChannelMonitorUpdate,
775 /// The value of the HTLC which was claimed, in msat.
776 htlc_value_msat: u64,
778 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
779 /// or has been forgotten (presumably previously claimed).
783 /// The return value of `monitor_updating_restored`
784 pub(super) struct MonitorRestoreUpdates {
785 pub raa: Option<msgs::RevokeAndACK>,
786 pub commitment_update: Option<msgs::CommitmentUpdate>,
787 pub order: RAACommitmentOrder,
788 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
789 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
790 pub finalized_claimed_htlcs: Vec<HTLCSource>,
791 pub funding_broadcastable: Option<Transaction>,
792 pub channel_ready: Option<msgs::ChannelReady>,
793 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
796 /// The return value of `signer_maybe_unblocked`
798 pub(super) struct SignerResumeUpdates {
799 pub commitment_update: Option<msgs::CommitmentUpdate>,
800 pub funding_signed: Option<msgs::FundingSigned>,
801 pub channel_ready: Option<msgs::ChannelReady>,
804 /// The return value of `channel_reestablish`
805 pub(super) struct ReestablishResponses {
806 pub channel_ready: Option<msgs::ChannelReady>,
807 pub raa: Option<msgs::RevokeAndACK>,
808 pub commitment_update: Option<msgs::CommitmentUpdate>,
809 pub order: RAACommitmentOrder,
810 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
811 pub shutdown_msg: Option<msgs::Shutdown>,
814 /// The result of a shutdown that should be handled.
816 pub(crate) struct ShutdownResult {
817 /// A channel monitor update to apply.
818 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
819 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
820 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
821 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
822 /// propagated to the remainder of the batch.
823 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
824 pub(crate) channel_id: ChannelId,
825 pub(crate) counterparty_node_id: PublicKey,
828 /// If the majority of the channels funds are to the fundee and the initiator holds only just
829 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
830 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
831 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
832 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
833 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
834 /// by this multiple without hitting this case, before sending.
835 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
836 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
837 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
838 /// leave the channel less usable as we hold a bigger reserve.
839 #[cfg(any(fuzzing, test))]
840 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
841 #[cfg(not(any(fuzzing, test)))]
842 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
844 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
845 /// channel creation on an inbound channel, we simply force-close and move on.
846 /// This constant is the one suggested in BOLT 2.
847 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
849 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
850 /// not have enough balance value remaining to cover the onchain cost of this new
851 /// HTLC weight. If this happens, our counterparty fails the reception of our
852 /// commitment_signed including this new HTLC due to infringement on the channel
854 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
855 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
856 /// leads to a channel force-close. Ultimately, this is an issue coming from the
857 /// design of LN state machines, allowing asynchronous updates.
858 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
860 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
861 /// commitment transaction fees, with at least this many HTLCs present on the commitment
862 /// transaction (not counting the value of the HTLCs themselves).
863 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
865 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
866 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
867 /// ChannelUpdate prompted by the config update. This value was determined as follows:
869 /// * The expected interval between ticks (1 minute).
870 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
871 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
872 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
873 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
875 /// The number of ticks that may elapse while we're waiting for a response to a
876 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
879 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
880 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
882 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
883 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
884 /// exceeding this age limit will be force-closed and purged from memory.
885 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
887 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
888 pub(crate) const COINBASE_MATURITY: u32 = 100;
890 struct PendingChannelMonitorUpdate {
891 update: ChannelMonitorUpdate,
894 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
895 (0, update, required),
898 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
899 /// its variants containing an appropriate channel struct.
900 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
901 UnfundedOutboundV1(OutboundV1Channel<SP>),
902 UnfundedInboundV1(InboundV1Channel<SP>),
906 impl<'a, SP: Deref> ChannelPhase<SP> where
907 SP::Target: SignerProvider,
908 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
910 pub fn context(&'a self) -> &'a ChannelContext<SP> {
912 ChannelPhase::Funded(chan) => &chan.context,
913 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
914 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
918 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
920 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
921 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
922 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
927 /// Contains all state common to unfunded inbound/outbound channels.
928 pub(super) struct UnfundedChannelContext {
929 /// A counter tracking how many ticks have elapsed since this unfunded channel was
930 /// created. If this unfunded channel reaches peer has yet to respond after reaching
931 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
933 /// This is so that we don't keep channels around that haven't progressed to a funded state
934 /// in a timely manner.
935 unfunded_channel_age_ticks: usize,
938 impl UnfundedChannelContext {
939 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
940 /// having reached the unfunded channel age limit.
942 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
943 pub fn should_expire_unfunded_channel(&mut self) -> bool {
944 self.unfunded_channel_age_ticks += 1;
945 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
949 /// Contains everything about the channel including state, and various flags.
950 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
951 config: LegacyChannelConfig,
953 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
954 // constructed using it. The second element in the tuple corresponds to the number of ticks that
955 // have elapsed since the update occurred.
956 prev_config: Option<(ChannelConfig, usize)>,
958 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
962 /// The current channel ID.
963 channel_id: ChannelId,
964 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
965 /// Will be `None` for channels created prior to 0.0.115.
966 temporary_channel_id: Option<ChannelId>,
967 channel_state: ChannelState,
969 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
970 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
972 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
973 // Note that a number of our tests were written prior to the behavior here which retransmits
974 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
976 #[cfg(any(test, feature = "_test_utils"))]
977 pub(crate) announcement_sigs_state: AnnouncementSigsState,
978 #[cfg(not(any(test, feature = "_test_utils")))]
979 announcement_sigs_state: AnnouncementSigsState,
981 secp_ctx: Secp256k1<secp256k1::All>,
982 channel_value_satoshis: u64,
984 latest_monitor_update_id: u64,
986 holder_signer: ChannelSignerType<SP>,
987 shutdown_scriptpubkey: Option<ShutdownScript>,
988 destination_script: ScriptBuf,
990 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
991 // generation start at 0 and count up...this simplifies some parts of implementation at the
992 // cost of others, but should really just be changed.
994 cur_holder_commitment_transaction_number: u64,
995 cur_counterparty_commitment_transaction_number: u64,
996 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
997 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
998 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
999 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1001 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1002 /// need to ensure we resend them in the order we originally generated them. Note that because
1003 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1004 /// sufficient to simply set this to the opposite of any message we are generating as we
1005 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1006 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1008 resend_order: RAACommitmentOrder,
1010 monitor_pending_channel_ready: bool,
1011 monitor_pending_revoke_and_ack: bool,
1012 monitor_pending_commitment_signed: bool,
1014 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1015 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1016 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1017 // but need to handle this somehow or we run the risk of losing HTLCs!
1018 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1019 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1020 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1022 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1023 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1024 /// the future when the signer indicates it may have a signature for us.
1026 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1027 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1028 signer_pending_commitment_update: bool,
1029 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1030 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1031 /// outbound or inbound.
1032 signer_pending_funding: bool,
1034 // pending_update_fee is filled when sending and receiving update_fee.
1036 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1037 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1038 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1039 // HTLCs with similar state.
1040 pending_update_fee: Option<(u32, FeeUpdateState)>,
1041 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1042 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1043 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1044 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1045 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1046 holding_cell_update_fee: Option<u32>,
1047 next_holder_htlc_id: u64,
1048 next_counterparty_htlc_id: u64,
1049 feerate_per_kw: u32,
1051 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1052 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1053 /// new block is received, ensuring it's always at least moderately close to the current real
1055 update_time_counter: u32,
1057 #[cfg(debug_assertions)]
1058 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1059 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1060 #[cfg(debug_assertions)]
1061 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1062 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1064 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1065 target_closing_feerate_sats_per_kw: Option<u32>,
1067 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1068 /// update, we need to delay processing it until later. We do that here by simply storing the
1069 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1070 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1072 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1073 /// transaction. These are set once we reach `closing_negotiation_ready`.
1075 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1077 closing_fee_limits: Option<(u64, u64)>,
1079 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1080 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1081 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1082 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1083 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1085 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1086 /// until we see a `commitment_signed` before doing so.
1088 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1089 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1090 expecting_peer_commitment_signed: bool,
1092 /// The hash of the block in which the funding transaction was included.
1093 funding_tx_confirmed_in: Option<BlockHash>,
1094 funding_tx_confirmation_height: u32,
1095 short_channel_id: Option<u64>,
1096 /// Either the height at which this channel was created or the height at which it was last
1097 /// serialized if it was serialized by versions prior to 0.0.103.
1098 /// We use this to close if funding is never broadcasted.
1099 channel_creation_height: u32,
1101 counterparty_dust_limit_satoshis: u64,
1104 pub(super) holder_dust_limit_satoshis: u64,
1106 holder_dust_limit_satoshis: u64,
1109 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1111 counterparty_max_htlc_value_in_flight_msat: u64,
1114 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1116 holder_max_htlc_value_in_flight_msat: u64,
1118 /// minimum channel reserve for self to maintain - set by them.
1119 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1122 pub(super) holder_selected_channel_reserve_satoshis: u64,
1124 holder_selected_channel_reserve_satoshis: u64,
1126 counterparty_htlc_minimum_msat: u64,
1127 holder_htlc_minimum_msat: u64,
1129 pub counterparty_max_accepted_htlcs: u16,
1131 counterparty_max_accepted_htlcs: u16,
1132 holder_max_accepted_htlcs: u16,
1133 minimum_depth: Option<u32>,
1135 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1137 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1138 funding_transaction: Option<Transaction>,
1139 is_batch_funding: Option<()>,
1141 counterparty_cur_commitment_point: Option<PublicKey>,
1142 counterparty_prev_commitment_point: Option<PublicKey>,
1143 counterparty_node_id: PublicKey,
1145 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1147 commitment_secrets: CounterpartyCommitmentSecrets,
1149 channel_update_status: ChannelUpdateStatus,
1150 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1151 /// not complete within a single timer tick (one minute), we should force-close the channel.
1152 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1154 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1155 /// our peer and start the closing_signed negotiation fresh.
1156 closing_signed_in_flight: bool,
1158 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1159 /// This can be used to rebroadcast the channel_announcement message later.
1160 announcement_sigs: Option<(Signature, Signature)>,
1162 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1163 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1164 // be, by comparing the cached values to the fee of the tranaction generated by
1165 // `build_commitment_transaction`.
1166 #[cfg(any(test, fuzzing))]
1167 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1168 #[cfg(any(test, fuzzing))]
1169 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1171 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1172 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1173 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1174 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1175 /// message until we receive a channel_reestablish.
1177 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1178 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1180 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1181 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1182 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1183 /// unblock the state machine.
1185 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1186 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1187 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1189 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1190 /// [`msgs::RevokeAndACK`] message from the counterparty.
1191 sent_message_awaiting_response: Option<usize>,
1193 #[cfg(any(test, fuzzing))]
1194 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1195 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1196 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1197 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1198 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1199 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1200 historical_inbound_htlc_fulfills: HashSet<u64>,
1202 /// This channel's type, as negotiated during channel open
1203 channel_type: ChannelTypeFeatures,
1205 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1206 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1207 // the channel's funding UTXO.
1209 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1210 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1211 // associated channel mapping.
1213 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1214 // to store all of them.
1215 latest_inbound_scid_alias: Option<u64>,
1217 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1218 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1219 // don't currently support node id aliases and eventually privacy should be provided with
1220 // blinded paths instead of simple scid+node_id aliases.
1221 outbound_scid_alias: u64,
1223 // We track whether we already emitted a `ChannelPending` event.
1224 channel_pending_event_emitted: bool,
1226 // We track whether we already emitted a `ChannelReady` event.
1227 channel_ready_event_emitted: bool,
1229 /// The unique identifier used to re-derive the private key material for the channel through
1230 /// [`SignerProvider::derive_channel_signer`].
1231 channel_keys_id: [u8; 32],
1233 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1234 /// store it here and only release it to the `ChannelManager` once it asks for it.
1235 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1238 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1239 /// Allowed in any state (including after shutdown)
1240 pub fn get_update_time_counter(&self) -> u32 {
1241 self.update_time_counter
1244 pub fn get_latest_monitor_update_id(&self) -> u64 {
1245 self.latest_monitor_update_id
1248 pub fn should_announce(&self) -> bool {
1249 self.config.announced_channel
1252 pub fn is_outbound(&self) -> bool {
1253 self.channel_transaction_parameters.is_outbound_from_holder
1256 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1257 /// Allowed in any state (including after shutdown)
1258 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1259 self.config.options.forwarding_fee_base_msat
1262 /// Returns true if we've ever received a message from the remote end for this Channel
1263 pub fn have_received_message(&self) -> bool {
1264 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1267 /// Returns true if this channel is fully established and not known to be closing.
1268 /// Allowed in any state (including after shutdown)
1269 pub fn is_usable(&self) -> bool {
1270 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1271 !self.channel_state.is_local_shutdown_sent() &&
1272 !self.channel_state.is_remote_shutdown_sent() &&
1273 !self.monitor_pending_channel_ready
1276 /// shutdown state returns the state of the channel in its various stages of shutdown
1277 pub fn shutdown_state(&self) -> ChannelShutdownState {
1278 match self.channel_state {
1279 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1280 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1281 ChannelShutdownState::ShutdownInitiated
1282 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1283 ChannelShutdownState::ResolvingHTLCs
1284 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1285 ChannelShutdownState::NegotiatingClosingFee
1287 ChannelShutdownState::NotShuttingDown
1289 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1290 _ => ChannelShutdownState::NotShuttingDown,
1294 fn closing_negotiation_ready(&self) -> bool {
1295 let is_ready_to_close = match self.channel_state {
1296 ChannelState::AwaitingChannelReady(flags) =>
1297 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1298 ChannelState::ChannelReady(flags) =>
1299 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1302 self.pending_inbound_htlcs.is_empty() &&
1303 self.pending_outbound_htlcs.is_empty() &&
1304 self.pending_update_fee.is_none() &&
1308 /// Returns true if this channel is currently available for use. This is a superset of
1309 /// is_usable() and considers things like the channel being temporarily disabled.
1310 /// Allowed in any state (including after shutdown)
1311 pub fn is_live(&self) -> bool {
1312 self.is_usable() && !self.channel_state.is_peer_disconnected()
1315 // Public utilities:
1317 pub fn channel_id(&self) -> ChannelId {
1321 // Return the `temporary_channel_id` used during channel establishment.
1323 // Will return `None` for channels created prior to LDK version 0.0.115.
1324 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1325 self.temporary_channel_id
1328 pub fn minimum_depth(&self) -> Option<u32> {
1332 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1333 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1334 pub fn get_user_id(&self) -> u128 {
1338 /// Gets the channel's type
1339 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1343 /// Gets the channel's `short_channel_id`.
1345 /// Will return `None` if the channel hasn't been confirmed yet.
1346 pub fn get_short_channel_id(&self) -> Option<u64> {
1347 self.short_channel_id
1350 /// Allowed in any state (including after shutdown)
1351 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1352 self.latest_inbound_scid_alias
1355 /// Allowed in any state (including after shutdown)
1356 pub fn outbound_scid_alias(&self) -> u64 {
1357 self.outbound_scid_alias
1360 /// Returns the holder signer for this channel.
1362 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1363 return &self.holder_signer
1366 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1367 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1368 /// or prior to any channel actions during `Channel` initialization.
1369 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1370 debug_assert_eq!(self.outbound_scid_alias, 0);
1371 self.outbound_scid_alias = outbound_scid_alias;
1374 /// Returns the funding_txo we either got from our peer, or were given by
1375 /// get_funding_created.
1376 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1377 self.channel_transaction_parameters.funding_outpoint
1380 /// Returns the height in which our funding transaction was confirmed.
1381 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1382 let conf_height = self.funding_tx_confirmation_height;
1383 if conf_height > 0 {
1390 /// Returns the block hash in which our funding transaction was confirmed.
1391 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1392 self.funding_tx_confirmed_in
1395 /// Returns the current number of confirmations on the funding transaction.
1396 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1397 if self.funding_tx_confirmation_height == 0 {
1398 // We either haven't seen any confirmation yet, or observed a reorg.
1402 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1405 fn get_holder_selected_contest_delay(&self) -> u16 {
1406 self.channel_transaction_parameters.holder_selected_contest_delay
1409 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1410 &self.channel_transaction_parameters.holder_pubkeys
1413 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1414 self.channel_transaction_parameters.counterparty_parameters
1415 .as_ref().map(|params| params.selected_contest_delay)
1418 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1419 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1422 /// Allowed in any state (including after shutdown)
1423 pub fn get_counterparty_node_id(&self) -> PublicKey {
1424 self.counterparty_node_id
1427 /// Allowed in any state (including after shutdown)
1428 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1429 self.holder_htlc_minimum_msat
1432 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1433 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1434 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1437 /// Allowed in any state (including after shutdown)
1438 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1440 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1441 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1442 // channel might have been used to route very small values (either by honest users or as DoS).
1443 self.channel_value_satoshis * 1000 * 9 / 10,
1445 self.counterparty_max_htlc_value_in_flight_msat
1449 /// Allowed in any state (including after shutdown)
1450 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1451 self.counterparty_htlc_minimum_msat
1454 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1455 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1456 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1459 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1460 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1461 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1463 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1464 party_max_htlc_value_in_flight_msat
1469 pub fn get_value_satoshis(&self) -> u64 {
1470 self.channel_value_satoshis
1473 pub fn get_fee_proportional_millionths(&self) -> u32 {
1474 self.config.options.forwarding_fee_proportional_millionths
1477 pub fn get_cltv_expiry_delta(&self) -> u16 {
1478 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1481 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1482 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1483 where F::Target: FeeEstimator
1485 match self.config.options.max_dust_htlc_exposure {
1486 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1487 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1488 ConfirmationTarget::OnChainSweep) as u64;
1489 feerate_per_kw.saturating_mul(multiplier)
1491 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1495 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1496 pub fn prev_config(&self) -> Option<ChannelConfig> {
1497 self.prev_config.map(|prev_config| prev_config.0)
1500 // Checks whether we should emit a `ChannelPending` event.
1501 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1502 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1505 // Returns whether we already emitted a `ChannelPending` event.
1506 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1507 self.channel_pending_event_emitted
1510 // Remembers that we already emitted a `ChannelPending` event.
1511 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1512 self.channel_pending_event_emitted = true;
1515 // Checks whether we should emit a `ChannelReady` event.
1516 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1517 self.is_usable() && !self.channel_ready_event_emitted
1520 // Remembers that we already emitted a `ChannelReady` event.
1521 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1522 self.channel_ready_event_emitted = true;
1525 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1526 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1527 /// no longer be considered when forwarding HTLCs.
1528 pub fn maybe_expire_prev_config(&mut self) {
1529 if self.prev_config.is_none() {
1532 let prev_config = self.prev_config.as_mut().unwrap();
1534 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1535 self.prev_config = None;
1539 /// Returns the current [`ChannelConfig`] applied to the channel.
1540 pub fn config(&self) -> ChannelConfig {
1544 /// Updates the channel's config. A bool is returned indicating whether the config update
1545 /// applied resulted in a new ChannelUpdate message.
1546 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1547 let did_channel_update =
1548 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1549 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1550 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1551 if did_channel_update {
1552 self.prev_config = Some((self.config.options, 0));
1553 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1554 // policy change to propagate throughout the network.
1555 self.update_time_counter += 1;
1557 self.config.options = *config;
1561 /// Returns true if funding_signed was sent/received and the
1562 /// funding transaction has been broadcast if necessary.
1563 pub fn is_funding_broadcast(&self) -> bool {
1564 !self.channel_state.is_pre_funded_state() &&
1565 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1568 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1569 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1570 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1571 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1572 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1574 /// @local is used only to convert relevant internal structures which refer to remote vs local
1575 /// to decide value of outputs and direction of HTLCs.
1576 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1577 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1578 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1579 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1580 /// which peer generated this transaction and "to whom" this transaction flows.
1582 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1583 where L::Target: Logger
1585 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1586 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1587 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1589 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1590 let mut remote_htlc_total_msat = 0;
1591 let mut local_htlc_total_msat = 0;
1592 let mut value_to_self_msat_offset = 0;
1594 let mut feerate_per_kw = self.feerate_per_kw;
1595 if let Some((feerate, update_state)) = self.pending_update_fee {
1596 if match update_state {
1597 // Note that these match the inclusion criteria when scanning
1598 // pending_inbound_htlcs below.
1599 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1600 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1601 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1603 feerate_per_kw = feerate;
1607 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1608 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1609 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1611 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1613 macro_rules! get_htlc_in_commitment {
1614 ($htlc: expr, $offered: expr) => {
1615 HTLCOutputInCommitment {
1617 amount_msat: $htlc.amount_msat,
1618 cltv_expiry: $htlc.cltv_expiry,
1619 payment_hash: $htlc.payment_hash,
1620 transaction_output_index: None
1625 macro_rules! add_htlc_output {
1626 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1627 if $outbound == local { // "offered HTLC output"
1628 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1629 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1632 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1634 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1635 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1636 included_non_dust_htlcs.push((htlc_in_tx, $source));
1638 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1639 included_dust_htlcs.push((htlc_in_tx, $source));
1642 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1643 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1646 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1648 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1649 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1650 included_non_dust_htlcs.push((htlc_in_tx, $source));
1652 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1653 included_dust_htlcs.push((htlc_in_tx, $source));
1659 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1661 for ref htlc in self.pending_inbound_htlcs.iter() {
1662 let (include, state_name) = match htlc.state {
1663 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1664 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1665 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1666 InboundHTLCState::Committed => (true, "Committed"),
1667 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1671 add_htlc_output!(htlc, false, None, state_name);
1672 remote_htlc_total_msat += htlc.amount_msat;
1674 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1676 &InboundHTLCState::LocalRemoved(ref reason) => {
1677 if generated_by_local {
1678 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1679 inbound_htlc_preimages.push(preimage);
1680 value_to_self_msat_offset += htlc.amount_msat as i64;
1690 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1692 for ref htlc in self.pending_outbound_htlcs.iter() {
1693 let (include, state_name) = match htlc.state {
1694 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1695 OutboundHTLCState::Committed => (true, "Committed"),
1696 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1697 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1698 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1701 let preimage_opt = match htlc.state {
1702 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1703 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1704 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1708 if let Some(preimage) = preimage_opt {
1709 outbound_htlc_preimages.push(preimage);
1713 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1714 local_htlc_total_msat += htlc.amount_msat;
1716 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1718 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1719 value_to_self_msat_offset -= htlc.amount_msat as i64;
1721 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1722 if !generated_by_local {
1723 value_to_self_msat_offset -= htlc.amount_msat as i64;
1731 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1732 assert!(value_to_self_msat >= 0);
1733 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1734 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1735 // "violate" their reserve value by couting those against it. Thus, we have to convert
1736 // everything to i64 before subtracting as otherwise we can overflow.
1737 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1738 assert!(value_to_remote_msat >= 0);
1740 #[cfg(debug_assertions)]
1742 // Make sure that the to_self/to_remote is always either past the appropriate
1743 // channel_reserve *or* it is making progress towards it.
1744 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1745 self.holder_max_commitment_tx_output.lock().unwrap()
1747 self.counterparty_max_commitment_tx_output.lock().unwrap()
1749 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1750 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1751 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1752 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1755 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1756 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1757 let (value_to_self, value_to_remote) = if self.is_outbound() {
1758 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1760 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1763 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1764 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1765 let (funding_pubkey_a, funding_pubkey_b) = if local {
1766 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1768 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1771 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1772 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1777 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1778 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1783 let num_nondust_htlcs = included_non_dust_htlcs.len();
1785 let channel_parameters =
1786 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1787 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1788 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1795 &mut included_non_dust_htlcs,
1798 let mut htlcs_included = included_non_dust_htlcs;
1799 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1800 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1801 htlcs_included.append(&mut included_dust_htlcs);
1803 // For the stats, trimmed-to-0 the value in msats accordingly
1804 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1805 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1813 local_balance_msat: value_to_self_msat as u64,
1814 remote_balance_msat: value_to_remote_msat as u64,
1815 inbound_htlc_preimages,
1816 outbound_htlc_preimages,
1821 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1822 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1823 /// our counterparty!)
1824 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1825 /// TODO Some magic rust shit to compile-time check this?
1826 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1827 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1828 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1829 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1830 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1832 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1836 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1837 /// will sign and send to our counterparty.
1838 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1839 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1840 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1841 //may see payments to it!
1842 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1843 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1844 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1846 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1849 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1850 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1851 /// Panics if called before accept_channel/InboundV1Channel::new
1852 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1853 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1856 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1857 &self.get_counterparty_pubkeys().funding_pubkey
1860 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1864 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1865 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1866 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1867 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1868 // more dust balance if the feerate increases when we have several HTLCs pending
1869 // which are near the dust limit.
1870 let mut feerate_per_kw = self.feerate_per_kw;
1871 // If there's a pending update fee, use it to ensure we aren't under-estimating
1872 // potential feerate updates coming soon.
1873 if let Some((feerate, _)) = self.pending_update_fee {
1874 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1876 if let Some(feerate) = outbound_feerate_update {
1877 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1879 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1882 /// Get forwarding information for the counterparty.
1883 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1884 self.counterparty_forwarding_info.clone()
1887 /// Returns a HTLCStats about inbound pending htlcs
1888 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1890 let mut stats = HTLCStats {
1891 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1892 pending_htlcs_value_msat: 0,
1893 on_counterparty_tx_dust_exposure_msat: 0,
1894 on_holder_tx_dust_exposure_msat: 0,
1895 holding_cell_msat: 0,
1896 on_holder_tx_holding_cell_htlcs_count: 0,
1899 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1902 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1903 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1904 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1906 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1907 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1908 for ref htlc in context.pending_inbound_htlcs.iter() {
1909 stats.pending_htlcs_value_msat += htlc.amount_msat;
1910 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1911 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1913 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1914 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1920 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1921 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1923 let mut stats = HTLCStats {
1924 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1925 pending_htlcs_value_msat: 0,
1926 on_counterparty_tx_dust_exposure_msat: 0,
1927 on_holder_tx_dust_exposure_msat: 0,
1928 holding_cell_msat: 0,
1929 on_holder_tx_holding_cell_htlcs_count: 0,
1932 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1935 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1936 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1937 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1939 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1940 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1941 for ref htlc in context.pending_outbound_htlcs.iter() {
1942 stats.pending_htlcs_value_msat += htlc.amount_msat;
1943 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1944 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1946 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1947 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1951 for update in context.holding_cell_htlc_updates.iter() {
1952 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1953 stats.pending_htlcs += 1;
1954 stats.pending_htlcs_value_msat += amount_msat;
1955 stats.holding_cell_msat += amount_msat;
1956 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1957 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1959 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1960 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1962 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1969 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1970 /// Doesn't bother handling the
1971 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1972 /// corner case properly.
1973 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1974 -> AvailableBalances
1975 where F::Target: FeeEstimator
1977 let context = &self;
1978 // Note that we have to handle overflow due to the above case.
1979 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1980 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1982 let mut balance_msat = context.value_to_self_msat;
1983 for ref htlc in context.pending_inbound_htlcs.iter() {
1984 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1985 balance_msat += htlc.amount_msat;
1988 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1990 let outbound_capacity_msat = context.value_to_self_msat
1991 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1993 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1995 let mut available_capacity_msat = outbound_capacity_msat;
1997 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1998 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2002 if context.is_outbound() {
2003 // We should mind channel commit tx fee when computing how much of the available capacity
2004 // can be used in the next htlc. Mirrors the logic in send_htlc.
2006 // The fee depends on whether the amount we will be sending is above dust or not,
2007 // and the answer will in turn change the amount itself — making it a circular
2009 // This complicates the computation around dust-values, up to the one-htlc-value.
2010 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2011 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2012 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2015 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2016 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2017 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2018 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2019 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2020 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2021 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2024 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2025 // value ends up being below dust, we have this fee available again. In that case,
2026 // match the value to right-below-dust.
2027 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2028 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2029 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2030 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2031 debug_assert!(one_htlc_difference_msat != 0);
2032 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2033 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2034 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2036 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2039 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2040 // sending a new HTLC won't reduce their balance below our reserve threshold.
2041 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2042 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2043 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2046 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2047 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2049 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2050 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2051 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2053 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2054 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2055 // we've selected for them, we can only send dust HTLCs.
2056 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2060 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2062 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2063 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2064 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2065 // send above the dust limit (as the router can always overpay to meet the dust limit).
2066 let mut remaining_msat_below_dust_exposure_limit = None;
2067 let mut dust_exposure_dust_limit_msat = 0;
2068 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2070 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2071 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2073 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2074 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2075 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2077 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2078 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2079 remaining_msat_below_dust_exposure_limit =
2080 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2081 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2084 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2085 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2086 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2087 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2088 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2089 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2092 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2093 if available_capacity_msat < dust_exposure_dust_limit_msat {
2094 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2096 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2100 available_capacity_msat = cmp::min(available_capacity_msat,
2101 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2103 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2104 available_capacity_msat = 0;
2108 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2109 - context.value_to_self_msat as i64
2110 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2111 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2113 outbound_capacity_msat,
2114 next_outbound_htlc_limit_msat: available_capacity_msat,
2115 next_outbound_htlc_minimum_msat,
2120 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2121 let context = &self;
2122 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2125 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2126 /// number of pending HTLCs that are on track to be in our next commitment tx.
2128 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2129 /// `fee_spike_buffer_htlc` is `Some`.
2131 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2132 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2134 /// Dust HTLCs are excluded.
2135 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2136 let context = &self;
2137 assert!(context.is_outbound());
2139 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2142 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2143 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2145 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2146 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2148 let mut addl_htlcs = 0;
2149 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2151 HTLCInitiator::LocalOffered => {
2152 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2156 HTLCInitiator::RemoteOffered => {
2157 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2163 let mut included_htlcs = 0;
2164 for ref htlc in context.pending_inbound_htlcs.iter() {
2165 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2168 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2169 // transaction including this HTLC if it times out before they RAA.
2170 included_htlcs += 1;
2173 for ref htlc in context.pending_outbound_htlcs.iter() {
2174 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2178 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2179 OutboundHTLCState::Committed => included_htlcs += 1,
2180 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2181 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2182 // transaction won't be generated until they send us their next RAA, which will mean
2183 // dropping any HTLCs in this state.
2188 for htlc in context.holding_cell_htlc_updates.iter() {
2190 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2191 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2196 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2197 // ack we're guaranteed to never include them in commitment txs anymore.
2201 let num_htlcs = included_htlcs + addl_htlcs;
2202 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2203 #[cfg(any(test, fuzzing))]
2206 if fee_spike_buffer_htlc.is_some() {
2207 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2209 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2210 + context.holding_cell_htlc_updates.len();
2211 let commitment_tx_info = CommitmentTxInfoCached {
2213 total_pending_htlcs,
2214 next_holder_htlc_id: match htlc.origin {
2215 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2216 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2218 next_counterparty_htlc_id: match htlc.origin {
2219 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2220 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2222 feerate: context.feerate_per_kw,
2224 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2229 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2230 /// pending HTLCs that are on track to be in their next commitment tx
2232 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2233 /// `fee_spike_buffer_htlc` is `Some`.
2235 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2236 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2238 /// Dust HTLCs are excluded.
2239 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2240 let context = &self;
2241 assert!(!context.is_outbound());
2243 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2246 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2247 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2249 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2250 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2252 let mut addl_htlcs = 0;
2253 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2255 HTLCInitiator::LocalOffered => {
2256 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2260 HTLCInitiator::RemoteOffered => {
2261 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2267 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2268 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2269 // committed outbound HTLCs, see below.
2270 let mut included_htlcs = 0;
2271 for ref htlc in context.pending_inbound_htlcs.iter() {
2272 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2275 included_htlcs += 1;
2278 for ref htlc in context.pending_outbound_htlcs.iter() {
2279 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2282 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2283 // i.e. if they've responded to us with an RAA after announcement.
2285 OutboundHTLCState::Committed => included_htlcs += 1,
2286 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2287 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2292 let num_htlcs = included_htlcs + addl_htlcs;
2293 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2294 #[cfg(any(test, fuzzing))]
2297 if fee_spike_buffer_htlc.is_some() {
2298 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2300 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2301 let commitment_tx_info = CommitmentTxInfoCached {
2303 total_pending_htlcs,
2304 next_holder_htlc_id: match htlc.origin {
2305 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2306 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2308 next_counterparty_htlc_id: match htlc.origin {
2309 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2310 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2312 feerate: context.feerate_per_kw,
2314 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2319 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2320 where F: Fn() -> Option<O> {
2321 match self.channel_state {
2322 ChannelState::FundingNegotiated => f(),
2323 ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
2332 /// Returns the transaction if there is a pending funding transaction that is yet to be
2334 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2335 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2338 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2340 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2341 self.if_unbroadcasted_funding(||
2342 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2346 /// Returns whether the channel is funded in a batch.
2347 pub fn is_batch_funding(&self) -> bool {
2348 self.is_batch_funding.is_some()
2351 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2353 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2354 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2357 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2358 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2359 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2360 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2361 /// immediately (others we will have to allow to time out).
2362 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2363 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2364 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2365 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2366 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2367 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2369 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2370 // return them to fail the payment.
2371 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2372 let counterparty_node_id = self.get_counterparty_node_id();
2373 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2375 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2376 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2381 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2382 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2383 // returning a channel monitor update here would imply a channel monitor update before
2384 // we even registered the channel monitor to begin with, which is invalid.
2385 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2386 // funding transaction, don't return a funding txo (which prevents providing the
2387 // monitor update to the user, even if we return one).
2388 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2389 let generate_monitor_update = match self.channel_state {
2390 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2393 if generate_monitor_update {
2394 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2395 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2396 update_id: self.latest_monitor_update_id,
2397 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2401 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2403 self.channel_state = ChannelState::ShutdownComplete;
2404 self.update_time_counter += 1;
2407 dropped_outbound_htlcs,
2408 unbroadcasted_batch_funding_txid,
2409 channel_id: self.channel_id,
2410 counterparty_node_id: self.counterparty_node_id,
2414 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2415 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2416 let counterparty_keys = self.build_remote_transaction_keys();
2417 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2419 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2420 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2421 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2422 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2424 match &self.holder_signer {
2425 // TODO (arik): move match into calling method for Taproot
2426 ChannelSignerType::Ecdsa(ecdsa) => {
2427 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2428 .map(|(signature, _)| msgs::FundingSigned {
2429 channel_id: self.channel_id(),
2432 partial_signature_with_nonce: None,
2436 if funding_signed.is_none() {
2437 #[cfg(not(async_signing))] {
2438 panic!("Failed to get signature for funding_signed");
2440 #[cfg(async_signing)] {
2441 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2442 self.signer_pending_funding = true;
2444 } else if self.signer_pending_funding {
2445 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2446 self.signer_pending_funding = false;
2449 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2450 (counterparty_initial_commitment_tx, funding_signed)
2452 // TODO (taproot|arik)
2459 // Internal utility functions for channels
2461 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2462 /// `channel_value_satoshis` in msat, set through
2463 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2465 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2467 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2468 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2469 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2471 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2474 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2476 channel_value_satoshis * 10 * configured_percent
2479 /// Returns a minimum channel reserve value the remote needs to maintain,
2480 /// required by us according to the configured or default
2481 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2483 /// Guaranteed to return a value no larger than channel_value_satoshis
2485 /// This is used both for outbound and inbound channels and has lower bound
2486 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2487 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2488 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2489 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2492 /// This is for legacy reasons, present for forward-compatibility.
2493 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2494 /// from storage. Hence, we use this function to not persist default values of
2495 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2496 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2497 let (q, _) = channel_value_satoshis.overflowing_div(100);
2498 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2501 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2502 // Note that num_htlcs should not include dust HTLCs.
2504 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2505 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2508 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2509 // Note that num_htlcs should not include dust HTLCs.
2510 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2511 // Note that we need to divide before multiplying to round properly,
2512 // since the lowest denomination of bitcoin on-chain is the satoshi.
2513 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2516 // Holder designates channel data owned for the benefit of the user client.
2517 // Counterparty designates channel data owned by the another channel participant entity.
2518 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2519 pub context: ChannelContext<SP>,
2522 #[cfg(any(test, fuzzing))]
2523 struct CommitmentTxInfoCached {
2525 total_pending_htlcs: usize,
2526 next_holder_htlc_id: u64,
2527 next_counterparty_htlc_id: u64,
2531 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2532 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2533 trait FailHTLCContents {
2534 type Message: FailHTLCMessageName;
2535 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2536 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2537 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2539 impl FailHTLCContents for msgs::OnionErrorPacket {
2540 type Message = msgs::UpdateFailHTLC;
2541 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2542 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2544 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2545 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2547 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2548 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2551 impl FailHTLCContents for (u16, [u8; 32]) {
2552 type Message = msgs::UpdateFailMalformedHTLC; // (failure_code, sha256_of_onion)
2553 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2554 msgs::UpdateFailMalformedHTLC {
2557 failure_code: self.0,
2558 sha256_of_onion: self.1
2561 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2562 InboundHTLCState::LocalRemoved(
2563 InboundHTLCRemovalReason::FailMalformed((self.1, self.0))
2566 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2567 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2569 failure_code: self.0,
2570 sha256_of_onion: self.1
2575 trait FailHTLCMessageName {
2576 fn name() -> &'static str;
2578 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2579 fn name() -> &'static str {
2583 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2584 fn name() -> &'static str {
2585 "update_fail_malformed_htlc"
2589 impl<SP: Deref> Channel<SP> where
2590 SP::Target: SignerProvider,
2591 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2593 fn check_remote_fee<F: Deref, L: Deref>(
2594 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2595 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2596 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2598 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2599 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2601 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2603 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2604 if feerate_per_kw < lower_limit {
2605 if let Some(cur_feerate) = cur_feerate_per_kw {
2606 if feerate_per_kw > cur_feerate {
2608 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2609 cur_feerate, feerate_per_kw);
2613 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2619 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2620 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2621 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2622 // outside of those situations will fail.
2623 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2627 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2632 1 + // script length (0)
2636 )*4 + // * 4 for non-witness parts
2637 2 + // witness marker and flag
2638 1 + // witness element count
2639 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2640 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2641 2*(1 + 71); // two signatures + sighash type flags
2642 if let Some(spk) = a_scriptpubkey {
2643 ret += ((8+1) + // output values and script length
2644 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2646 if let Some(spk) = b_scriptpubkey {
2647 ret += ((8+1) + // output values and script length
2648 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2654 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2655 assert!(self.context.pending_inbound_htlcs.is_empty());
2656 assert!(self.context.pending_outbound_htlcs.is_empty());
2657 assert!(self.context.pending_update_fee.is_none());
2659 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2660 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2661 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2663 if value_to_holder < 0 {
2664 assert!(self.context.is_outbound());
2665 total_fee_satoshis += (-value_to_holder) as u64;
2666 } else if value_to_counterparty < 0 {
2667 assert!(!self.context.is_outbound());
2668 total_fee_satoshis += (-value_to_counterparty) as u64;
2671 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2672 value_to_counterparty = 0;
2675 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2676 value_to_holder = 0;
2679 assert!(self.context.shutdown_scriptpubkey.is_some());
2680 let holder_shutdown_script = self.get_closing_scriptpubkey();
2681 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2682 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2684 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2685 (closing_transaction, total_fee_satoshis)
2688 fn funding_outpoint(&self) -> OutPoint {
2689 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2692 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2695 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2696 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2698 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2700 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2701 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2702 where L::Target: Logger {
2703 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2704 // (see equivalent if condition there).
2705 assert!(self.context.channel_state.should_force_holding_cell());
2706 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2707 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2708 self.context.latest_monitor_update_id = mon_update_id;
2709 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2710 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2714 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2715 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2716 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2717 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2719 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2720 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2723 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2724 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2725 // these, but for now we just have to treat them as normal.
2727 let mut pending_idx = core::usize::MAX;
2728 let mut htlc_value_msat = 0;
2729 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2730 if htlc.htlc_id == htlc_id_arg {
2731 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2732 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2733 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2735 InboundHTLCState::Committed => {},
2736 InboundHTLCState::LocalRemoved(ref reason) => {
2737 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2739 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2740 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2742 return UpdateFulfillFetch::DuplicateClaim {};
2745 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2746 // Don't return in release mode here so that we can update channel_monitor
2750 htlc_value_msat = htlc.amount_msat;
2754 if pending_idx == core::usize::MAX {
2755 #[cfg(any(test, fuzzing))]
2756 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2757 // this is simply a duplicate claim, not previously failed and we lost funds.
2758 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2759 return UpdateFulfillFetch::DuplicateClaim {};
2762 // Now update local state:
2764 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2765 // can claim it even if the channel hits the chain before we see their next commitment.
2766 self.context.latest_monitor_update_id += 1;
2767 let monitor_update = ChannelMonitorUpdate {
2768 update_id: self.context.latest_monitor_update_id,
2769 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2770 payment_preimage: payment_preimage_arg.clone(),
2774 if self.context.channel_state.should_force_holding_cell() {
2775 // Note that this condition is the same as the assertion in
2776 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2777 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2778 // do not not get into this branch.
2779 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2780 match pending_update {
2781 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2782 if htlc_id_arg == htlc_id {
2783 // Make sure we don't leave latest_monitor_update_id incremented here:
2784 self.context.latest_monitor_update_id -= 1;
2785 #[cfg(any(test, fuzzing))]
2786 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2787 return UpdateFulfillFetch::DuplicateClaim {};
2790 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2791 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2793 if htlc_id_arg == htlc_id {
2794 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2795 // TODO: We may actually be able to switch to a fulfill here, though its
2796 // rare enough it may not be worth the complexity burden.
2797 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2798 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2804 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2805 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2806 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2808 #[cfg(any(test, fuzzing))]
2809 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2810 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2812 #[cfg(any(test, fuzzing))]
2813 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2816 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2817 if let InboundHTLCState::Committed = htlc.state {
2819 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2820 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2822 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2823 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2826 UpdateFulfillFetch::NewClaim {
2829 msg: Some(msgs::UpdateFulfillHTLC {
2830 channel_id: self.context.channel_id(),
2831 htlc_id: htlc_id_arg,
2832 payment_preimage: payment_preimage_arg,
2837 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2838 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2839 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2840 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2841 // Even if we aren't supposed to let new monitor updates with commitment state
2842 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2843 // matter what. Sadly, to push a new monitor update which flies before others
2844 // already queued, we have to insert it into the pending queue and update the
2845 // update_ids of all the following monitors.
2846 if release_cs_monitor && msg.is_some() {
2847 let mut additional_update = self.build_commitment_no_status_check(logger);
2848 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2849 // to be strictly increasing by one, so decrement it here.
2850 self.context.latest_monitor_update_id = monitor_update.update_id;
2851 monitor_update.updates.append(&mut additional_update.updates);
2853 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2854 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2855 monitor_update.update_id = new_mon_id;
2856 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2857 held_update.update.update_id += 1;
2860 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2861 let update = self.build_commitment_no_status_check(logger);
2862 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2868 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2869 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2871 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2875 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2876 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2877 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2878 /// before we fail backwards.
2880 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2881 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2882 /// [`ChannelError::Ignore`].
2883 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2884 -> Result<(), ChannelError> where L::Target: Logger {
2885 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2886 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2889 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
2890 /// want to fail blinded HTLCs where we are not the intro node.
2892 /// See [`Self::queue_fail_htlc`] for more info.
2893 pub fn queue_fail_malformed_htlc<L: Deref>(
2894 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
2895 ) -> Result<(), ChannelError> where L::Target: Logger {
2896 self.fail_htlc(htlc_id_arg, (failure_code, sha256_of_onion), true, logger)
2897 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2900 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2901 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2902 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2903 /// before we fail backwards.
2905 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2906 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2907 /// [`ChannelError::Ignore`].
2908 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
2909 &mut self, htlc_id_arg: u64, err_packet: E, mut force_holding_cell: bool,
2911 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
2912 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2913 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2916 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2917 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2918 // these, but for now we just have to treat them as normal.
2920 let mut pending_idx = core::usize::MAX;
2921 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2922 if htlc.htlc_id == htlc_id_arg {
2924 InboundHTLCState::Committed => {},
2925 InboundHTLCState::LocalRemoved(ref reason) => {
2926 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2928 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2933 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2934 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2940 if pending_idx == core::usize::MAX {
2941 #[cfg(any(test, fuzzing))]
2942 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2943 // is simply a duplicate fail, not previously failed and we failed-back too early.
2944 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2948 if self.context.channel_state.should_force_holding_cell() {
2949 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2950 force_holding_cell = true;
2953 // Now update local state:
2954 if force_holding_cell {
2955 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2956 match pending_update {
2957 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2958 if htlc_id_arg == htlc_id {
2959 #[cfg(any(test, fuzzing))]
2960 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2964 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2965 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2967 if htlc_id_arg == htlc_id {
2968 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2969 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2975 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2976 self.context.holding_cell_htlc_updates.push(err_packet.to_htlc_update_awaiting_ack(htlc_id_arg));
2980 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
2981 E::Message::name(), &self.context.channel_id());
2983 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2984 htlc.state = err_packet.clone().to_inbound_htlc_state();
2987 Ok(Some(err_packet.to_message(htlc_id_arg, self.context.channel_id())))
2990 // Message handlers:
2991 /// Updates the state of the channel to indicate that all channels in the batch have received
2992 /// funding_signed and persisted their monitors.
2993 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2994 /// treated as a non-batch channel going forward.
2995 pub fn set_batch_ready(&mut self) {
2996 self.context.is_batch_funding = None;
2997 self.context.channel_state.clear_waiting_for_batch();
3000 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3001 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3003 pub fn channel_ready<NS: Deref, L: Deref>(
3004 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3005 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3006 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3008 NS::Target: NodeSigner,
3011 if self.context.channel_state.is_peer_disconnected() {
3012 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3013 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3016 if let Some(scid_alias) = msg.short_channel_id_alias {
3017 if Some(scid_alias) != self.context.short_channel_id {
3018 // The scid alias provided can be used to route payments *from* our counterparty,
3019 // i.e. can be used for inbound payments and provided in invoices, but is not used
3020 // when routing outbound payments.
3021 self.context.latest_inbound_scid_alias = Some(scid_alias);
3025 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3026 // batch, but we can receive channel_ready messages.
3027 let mut check_reconnection = false;
3028 match &self.context.channel_state {
3029 ChannelState::AwaitingChannelReady(flags) => {
3030 let flags = *flags & !FundedStateFlags::ALL;
3031 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3032 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3033 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3034 check_reconnection = true;
3035 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3036 self.context.channel_state.set_their_channel_ready();
3037 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3038 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3039 self.context.update_time_counter += 1;
3041 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3042 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3045 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3046 ChannelState::ChannelReady(_) => check_reconnection = true,
3047 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3049 if check_reconnection {
3050 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3051 // required, or they're sending a fresh SCID alias.
3052 let expected_point =
3053 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3054 // If they haven't ever sent an updated point, the point they send should match
3056 self.context.counterparty_cur_commitment_point
3057 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3058 // If we've advanced the commitment number once, the second commitment point is
3059 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3060 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3061 self.context.counterparty_prev_commitment_point
3063 // If they have sent updated points, channel_ready is always supposed to match
3064 // their "first" point, which we re-derive here.
3065 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3066 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3067 ).expect("We already advanced, so previous secret keys should have been validated already")))
3069 if expected_point != Some(msg.next_per_commitment_point) {
3070 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3075 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3076 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3078 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3080 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3083 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3084 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3085 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3086 ) -> Result<(), ChannelError>
3087 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3088 FE::Target: FeeEstimator, L::Target: Logger,
3090 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3091 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3093 // We can't accept HTLCs sent after we've sent a shutdown.
3094 if self.context.channel_state.is_local_shutdown_sent() {
3095 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3097 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3098 if self.context.channel_state.is_remote_shutdown_sent() {
3099 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3101 if self.context.channel_state.is_peer_disconnected() {
3102 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3104 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3105 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3107 if msg.amount_msat == 0 {
3108 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3110 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3111 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3114 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3115 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3116 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3117 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3119 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3120 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3123 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3124 // the reserve_satoshis we told them to always have as direct payment so that they lose
3125 // something if we punish them for broadcasting an old state).
3126 // Note that we don't really care about having a small/no to_remote output in our local
3127 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3128 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3129 // present in the next commitment transaction we send them (at least for fulfilled ones,
3130 // failed ones won't modify value_to_self).
3131 // Note that we will send HTLCs which another instance of rust-lightning would think
3132 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3133 // Channel state once they will not be present in the next received commitment
3135 let mut removed_outbound_total_msat = 0;
3136 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3137 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3138 removed_outbound_total_msat += htlc.amount_msat;
3139 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3140 removed_outbound_total_msat += htlc.amount_msat;
3144 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3145 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3148 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3149 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3150 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3152 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3153 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3154 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3155 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3156 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3157 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3158 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3162 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3163 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3164 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3165 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3166 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3167 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3168 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3172 let pending_value_to_self_msat =
3173 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3174 let pending_remote_value_msat =
3175 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3176 if pending_remote_value_msat < msg.amount_msat {
3177 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3180 // Check that the remote can afford to pay for this HTLC on-chain at the current
3181 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3183 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3184 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3185 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3187 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3188 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3192 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3193 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3195 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3196 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3200 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3201 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3205 if !self.context.is_outbound() {
3206 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3207 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3208 // side, only on the sender's. Note that with anchor outputs we are no longer as
3209 // sensitive to fee spikes, so we need to account for them.
3210 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3211 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3212 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3213 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3215 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3216 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3217 // the HTLC, i.e. its status is already set to failing.
3218 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3219 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3222 // Check that they won't violate our local required channel reserve by adding this HTLC.
3223 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3224 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3225 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3226 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3229 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3230 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3232 if msg.cltv_expiry >= 500000000 {
3233 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3236 if self.context.channel_state.is_local_shutdown_sent() {
3237 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3238 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3242 // Now update local state:
3243 self.context.next_counterparty_htlc_id += 1;
3244 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3245 htlc_id: msg.htlc_id,
3246 amount_msat: msg.amount_msat,
3247 payment_hash: msg.payment_hash,
3248 cltv_expiry: msg.cltv_expiry,
3249 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3254 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3256 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3257 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3258 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3259 if htlc.htlc_id == htlc_id {
3260 let outcome = match check_preimage {
3261 None => fail_reason.into(),
3262 Some(payment_preimage) => {
3263 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3264 if payment_hash != htlc.payment_hash {
3265 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3267 OutboundHTLCOutcome::Success(Some(payment_preimage))
3271 OutboundHTLCState::LocalAnnounced(_) =>
3272 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3273 OutboundHTLCState::Committed => {
3274 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3276 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3277 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3282 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3285 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3286 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3287 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3289 if self.context.channel_state.is_peer_disconnected() {
3290 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3293 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3296 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3297 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3298 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3300 if self.context.channel_state.is_peer_disconnected() {
3301 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3304 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3308 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3309 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3310 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3312 if self.context.channel_state.is_peer_disconnected() {
3313 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3316 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3320 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3321 where L::Target: Logger
3323 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3324 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3326 if self.context.channel_state.is_peer_disconnected() {
3327 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3329 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3330 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3333 let funding_script = self.context.get_funding_redeemscript();
3335 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3337 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3338 let commitment_txid = {
3339 let trusted_tx = commitment_stats.tx.trust();
3340 let bitcoin_tx = trusted_tx.built_transaction();
3341 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3343 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3344 log_bytes!(msg.signature.serialize_compact()[..]),
3345 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3346 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3347 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3348 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3352 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3354 // If our counterparty updated the channel fee in this commitment transaction, check that
3355 // they can actually afford the new fee now.
3356 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3357 update_state == FeeUpdateState::RemoteAnnounced
3360 debug_assert!(!self.context.is_outbound());
3361 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3362 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3363 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3366 #[cfg(any(test, fuzzing))]
3368 if self.context.is_outbound() {
3369 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3370 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3371 if let Some(info) = projected_commit_tx_info {
3372 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3373 + self.context.holding_cell_htlc_updates.len();
3374 if info.total_pending_htlcs == total_pending_htlcs
3375 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3376 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3377 && info.feerate == self.context.feerate_per_kw {
3378 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3384 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3385 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3388 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3389 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3390 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3391 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3392 // backwards compatibility, we never use it in production. To provide test coverage, here,
3393 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3394 #[allow(unused_assignments, unused_mut)]
3395 let mut separate_nondust_htlc_sources = false;
3396 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3397 use core::hash::{BuildHasher, Hasher};
3398 // Get a random value using the only std API to do so - the DefaultHasher
3399 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3400 separate_nondust_htlc_sources = rand_val % 2 == 0;
3403 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3404 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3405 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3406 if let Some(_) = htlc.transaction_output_index {
3407 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3408 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3409 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3411 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3412 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3413 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3414 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3415 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3416 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3417 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3418 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3420 if !separate_nondust_htlc_sources {
3421 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3424 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3426 if separate_nondust_htlc_sources {
3427 if let Some(source) = source_opt.take() {
3428 nondust_htlc_sources.push(source);
3431 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3434 let holder_commitment_tx = HolderCommitmentTransaction::new(
3435 commitment_stats.tx,
3437 msg.htlc_signatures.clone(),
3438 &self.context.get_holder_pubkeys().funding_pubkey,
3439 self.context.counterparty_funding_pubkey()
3442 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3443 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3445 // Update state now that we've passed all the can-fail calls...
3446 let mut need_commitment = false;
3447 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3448 if *update_state == FeeUpdateState::RemoteAnnounced {
3449 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3450 need_commitment = true;
3454 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3455 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3456 Some(forward_info.clone())
3458 if let Some(forward_info) = new_forward {
3459 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3460 &htlc.payment_hash, &self.context.channel_id);
3461 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3462 need_commitment = true;
3465 let mut claimed_htlcs = Vec::new();
3466 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3467 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3468 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3469 &htlc.payment_hash, &self.context.channel_id);
3470 // Grab the preimage, if it exists, instead of cloning
3471 let mut reason = OutboundHTLCOutcome::Success(None);
3472 mem::swap(outcome, &mut reason);
3473 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3474 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3475 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3476 // have a `Success(None)` reason. In this case we could forget some HTLC
3477 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3478 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3480 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3482 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3483 need_commitment = true;
3487 self.context.latest_monitor_update_id += 1;
3488 let mut monitor_update = ChannelMonitorUpdate {
3489 update_id: self.context.latest_monitor_update_id,
3490 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3491 commitment_tx: holder_commitment_tx,
3492 htlc_outputs: htlcs_and_sigs,
3494 nondust_htlc_sources,
3498 self.context.cur_holder_commitment_transaction_number -= 1;
3499 self.context.expecting_peer_commitment_signed = false;
3500 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3501 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3502 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3504 if self.context.channel_state.is_monitor_update_in_progress() {
3505 // In case we initially failed monitor updating without requiring a response, we need
3506 // to make sure the RAA gets sent first.
3507 self.context.monitor_pending_revoke_and_ack = true;
3508 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3509 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3510 // the corresponding HTLC status updates so that
3511 // get_last_commitment_update_for_send includes the right HTLCs.
3512 self.context.monitor_pending_commitment_signed = true;
3513 let mut additional_update = self.build_commitment_no_status_check(logger);
3514 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3515 // strictly increasing by one, so decrement it here.
3516 self.context.latest_monitor_update_id = monitor_update.update_id;
3517 monitor_update.updates.append(&mut additional_update.updates);
3519 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3520 &self.context.channel_id);
3521 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3524 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3525 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3526 // we'll send one right away when we get the revoke_and_ack when we
3527 // free_holding_cell_htlcs().
3528 let mut additional_update = self.build_commitment_no_status_check(logger);
3529 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3530 // strictly increasing by one, so decrement it here.
3531 self.context.latest_monitor_update_id = monitor_update.update_id;
3532 monitor_update.updates.append(&mut additional_update.updates);
3536 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3537 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3538 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3539 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3542 /// Public version of the below, checking relevant preconditions first.
3543 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3544 /// returns `(None, Vec::new())`.
3545 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3546 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3547 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3548 where F::Target: FeeEstimator, L::Target: Logger
3550 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3551 self.free_holding_cell_htlcs(fee_estimator, logger)
3552 } else { (None, Vec::new()) }
3555 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3556 /// for our counterparty.
3557 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3558 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3559 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3560 where F::Target: FeeEstimator, L::Target: Logger
3562 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3563 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3564 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3565 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3567 let mut monitor_update = ChannelMonitorUpdate {
3568 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3569 updates: Vec::new(),
3572 let mut htlc_updates = Vec::new();
3573 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3574 let mut update_add_count = 0;
3575 let mut update_fulfill_count = 0;
3576 let mut update_fail_count = 0;
3577 let mut htlcs_to_fail = Vec::new();
3578 for htlc_update in htlc_updates.drain(..) {
3579 // Note that this *can* fail, though it should be due to rather-rare conditions on
3580 // fee races with adding too many outputs which push our total payments just over
3581 // the limit. In case it's less rare than I anticipate, we may want to revisit
3582 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3583 // to rebalance channels.
3584 match &htlc_update {
3585 &HTLCUpdateAwaitingACK::AddHTLC {
3586 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3587 skimmed_fee_msat, blinding_point, ..
3589 match self.send_htlc(
3590 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3591 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3593 Ok(_) => update_add_count += 1,
3596 ChannelError::Ignore(ref msg) => {
3597 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3598 // If we fail to send here, then this HTLC should
3599 // be failed backwards. Failing to send here
3600 // indicates that this HTLC may keep being put back
3601 // into the holding cell without ever being
3602 // successfully forwarded/failed/fulfilled, causing
3603 // our counterparty to eventually close on us.
3604 htlcs_to_fail.push((source.clone(), *payment_hash));
3607 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3613 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3614 // If an HTLC claim was previously added to the holding cell (via
3615 // `get_update_fulfill_htlc`, then generating the claim message itself must
3616 // not fail - any in between attempts to claim the HTLC will have resulted
3617 // in it hitting the holding cell again and we cannot change the state of a
3618 // holding cell HTLC from fulfill to anything else.
3619 let mut additional_monitor_update =
3620 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3621 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3622 { monitor_update } else { unreachable!() };
3623 update_fulfill_count += 1;
3624 monitor_update.updates.append(&mut additional_monitor_update.updates);
3626 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3627 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3628 Ok(update_fail_msg_option) => {
3629 // If an HTLC failure was previously added to the holding cell (via
3630 // `queue_fail_htlc`) then generating the fail message itself must
3631 // not fail - we should never end up in a state where we double-fail
3632 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3633 // for a full revocation before failing.
3634 debug_assert!(update_fail_msg_option.is_some());
3635 update_fail_count += 1;
3638 if let ChannelError::Ignore(_) = e {}
3640 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3645 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
3646 match self.fail_htlc(htlc_id, (failure_code, sha256_of_onion), false, logger) {
3647 Ok(update_fail_malformed_opt) => {
3648 debug_assert!(update_fail_malformed_opt.is_some()); // See above comment
3649 update_fail_count += 1;
3652 if let ChannelError::Ignore(_) = e {}
3654 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3661 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3662 return (None, htlcs_to_fail);
3664 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3665 self.send_update_fee(feerate, false, fee_estimator, logger)
3670 let mut additional_update = self.build_commitment_no_status_check(logger);
3671 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3672 // but we want them to be strictly increasing by one, so reset it here.
3673 self.context.latest_monitor_update_id = monitor_update.update_id;
3674 monitor_update.updates.append(&mut additional_update.updates);
3676 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3677 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3678 update_add_count, update_fulfill_count, update_fail_count);
3680 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3681 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3687 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3688 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3689 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3690 /// generating an appropriate error *after* the channel state has been updated based on the
3691 /// revoke_and_ack message.
3692 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3693 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3694 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3695 where F::Target: FeeEstimator, L::Target: Logger,
3697 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3698 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3700 if self.context.channel_state.is_peer_disconnected() {
3701 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3703 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3704 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3707 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3709 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3710 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3711 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3715 if !self.context.channel_state.is_awaiting_remote_revoke() {
3716 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3717 // haven't given them a new commitment transaction to broadcast). We should probably
3718 // take advantage of this by updating our channel monitor, sending them an error, and
3719 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3720 // lot of work, and there's some chance this is all a misunderstanding anyway.
3721 // We have to do *something*, though, since our signer may get mad at us for otherwise
3722 // jumping a remote commitment number, so best to just force-close and move on.
3723 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3726 #[cfg(any(test, fuzzing))]
3728 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3729 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3732 match &self.context.holder_signer {
3733 ChannelSignerType::Ecdsa(ecdsa) => {
3734 ecdsa.validate_counterparty_revocation(
3735 self.context.cur_counterparty_commitment_transaction_number + 1,
3737 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3739 // TODO (taproot|arik)
3744 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3745 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3746 self.context.latest_monitor_update_id += 1;
3747 let mut monitor_update = ChannelMonitorUpdate {
3748 update_id: self.context.latest_monitor_update_id,
3749 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3750 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3751 secret: msg.per_commitment_secret,
3755 // Update state now that we've passed all the can-fail calls...
3756 // (note that we may still fail to generate the new commitment_signed message, but that's
3757 // OK, we step the channel here and *then* if the new generation fails we can fail the
3758 // channel based on that, but stepping stuff here should be safe either way.
3759 self.context.channel_state.clear_awaiting_remote_revoke();
3760 self.context.sent_message_awaiting_response = None;
3761 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3762 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3763 self.context.cur_counterparty_commitment_transaction_number -= 1;
3765 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3766 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3769 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3770 let mut to_forward_infos = Vec::new();
3771 let mut revoked_htlcs = Vec::new();
3772 let mut finalized_claimed_htlcs = Vec::new();
3773 let mut update_fail_htlcs = Vec::new();
3774 let mut update_fail_malformed_htlcs = Vec::new();
3775 let mut require_commitment = false;
3776 let mut value_to_self_msat_diff: i64 = 0;
3779 // Take references explicitly so that we can hold multiple references to self.context.
3780 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3781 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3782 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3784 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3785 pending_inbound_htlcs.retain(|htlc| {
3786 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3787 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3788 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3789 value_to_self_msat_diff += htlc.amount_msat as i64;
3791 *expecting_peer_commitment_signed = true;
3795 pending_outbound_htlcs.retain(|htlc| {
3796 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3797 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3798 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3799 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3801 finalized_claimed_htlcs.push(htlc.source.clone());
3802 // They fulfilled, so we sent them money
3803 value_to_self_msat_diff -= htlc.amount_msat as i64;
3808 for htlc in pending_inbound_htlcs.iter_mut() {
3809 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3811 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3815 let mut state = InboundHTLCState::Committed;
3816 mem::swap(&mut state, &mut htlc.state);
3818 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3819 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3820 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3821 require_commitment = true;
3822 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3823 match forward_info {
3824 PendingHTLCStatus::Fail(fail_msg) => {
3825 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3826 require_commitment = true;
3828 HTLCFailureMsg::Relay(msg) => {
3829 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3830 update_fail_htlcs.push(msg)
3832 HTLCFailureMsg::Malformed(msg) => {
3833 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3834 update_fail_malformed_htlcs.push(msg)
3838 PendingHTLCStatus::Forward(forward_info) => {
3839 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3840 to_forward_infos.push((forward_info, htlc.htlc_id));
3841 htlc.state = InboundHTLCState::Committed;
3847 for htlc in pending_outbound_htlcs.iter_mut() {
3848 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3849 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3850 htlc.state = OutboundHTLCState::Committed;
3851 *expecting_peer_commitment_signed = true;
3853 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3854 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3855 // Grab the preimage, if it exists, instead of cloning
3856 let mut reason = OutboundHTLCOutcome::Success(None);
3857 mem::swap(outcome, &mut reason);
3858 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3859 require_commitment = true;
3863 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3865 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3866 match update_state {
3867 FeeUpdateState::Outbound => {
3868 debug_assert!(self.context.is_outbound());
3869 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3870 self.context.feerate_per_kw = feerate;
3871 self.context.pending_update_fee = None;
3872 self.context.expecting_peer_commitment_signed = true;
3874 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3875 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3876 debug_assert!(!self.context.is_outbound());
3877 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3878 require_commitment = true;
3879 self.context.feerate_per_kw = feerate;
3880 self.context.pending_update_fee = None;
3885 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3886 let release_state_str =
3887 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3888 macro_rules! return_with_htlcs_to_fail {
3889 ($htlcs_to_fail: expr) => {
3890 if !release_monitor {
3891 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3892 update: monitor_update,
3894 return Ok(($htlcs_to_fail, None));
3896 return Ok(($htlcs_to_fail, Some(monitor_update)));
3901 if self.context.channel_state.is_monitor_update_in_progress() {
3902 // We can't actually generate a new commitment transaction (incl by freeing holding
3903 // cells) while we can't update the monitor, so we just return what we have.
3904 if require_commitment {
3905 self.context.monitor_pending_commitment_signed = true;
3906 // When the monitor updating is restored we'll call
3907 // get_last_commitment_update_for_send(), which does not update state, but we're
3908 // definitely now awaiting a remote revoke before we can step forward any more, so
3910 let mut additional_update = self.build_commitment_no_status_check(logger);
3911 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3912 // strictly increasing by one, so decrement it here.
3913 self.context.latest_monitor_update_id = monitor_update.update_id;
3914 monitor_update.updates.append(&mut additional_update.updates);
3916 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3917 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3918 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3919 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3920 return_with_htlcs_to_fail!(Vec::new());
3923 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3924 (Some(mut additional_update), htlcs_to_fail) => {
3925 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3926 // strictly increasing by one, so decrement it here.
3927 self.context.latest_monitor_update_id = monitor_update.update_id;
3928 monitor_update.updates.append(&mut additional_update.updates);
3930 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3931 &self.context.channel_id(), release_state_str);
3933 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3934 return_with_htlcs_to_fail!(htlcs_to_fail);
3936 (None, htlcs_to_fail) => {
3937 if require_commitment {
3938 let mut additional_update = self.build_commitment_no_status_check(logger);
3940 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3941 // strictly increasing by one, so decrement it here.
3942 self.context.latest_monitor_update_id = monitor_update.update_id;
3943 monitor_update.updates.append(&mut additional_update.updates);
3945 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3946 &self.context.channel_id(),
3947 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3950 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3951 return_with_htlcs_to_fail!(htlcs_to_fail);
3953 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3954 &self.context.channel_id(), release_state_str);
3956 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3957 return_with_htlcs_to_fail!(htlcs_to_fail);
3963 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3964 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3965 /// commitment update.
3966 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3967 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3968 where F::Target: FeeEstimator, L::Target: Logger
3970 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3971 assert!(msg_opt.is_none(), "We forced holding cell?");
3974 /// Adds a pending update to this channel. See the doc for send_htlc for
3975 /// further details on the optionness of the return value.
3976 /// If our balance is too low to cover the cost of the next commitment transaction at the
3977 /// new feerate, the update is cancelled.
3979 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3980 /// [`Channel`] if `force_holding_cell` is false.
3981 fn send_update_fee<F: Deref, L: Deref>(
3982 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3983 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3984 ) -> Option<msgs::UpdateFee>
3985 where F::Target: FeeEstimator, L::Target: Logger
3987 if !self.context.is_outbound() {
3988 panic!("Cannot send fee from inbound channel");
3990 if !self.context.is_usable() {
3991 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3993 if !self.context.is_live() {
3994 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3997 // Before proposing a feerate update, check that we can actually afford the new fee.
3998 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3999 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4000 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4001 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4002 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4003 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4004 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4005 //TODO: auto-close after a number of failures?
4006 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4010 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4011 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4012 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4013 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4014 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4015 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4018 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4019 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4023 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4024 force_holding_cell = true;
4027 if force_holding_cell {
4028 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4032 debug_assert!(self.context.pending_update_fee.is_none());
4033 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4035 Some(msgs::UpdateFee {
4036 channel_id: self.context.channel_id,
4041 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4042 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4044 /// No further message handling calls may be made until a channel_reestablish dance has
4046 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4047 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4048 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4049 if self.context.channel_state.is_pre_funded_state() {
4053 if self.context.channel_state.is_peer_disconnected() {
4054 // While the below code should be idempotent, it's simpler to just return early, as
4055 // redundant disconnect events can fire, though they should be rare.
4059 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4060 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4063 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4064 // will be retransmitted.
4065 self.context.last_sent_closing_fee = None;
4066 self.context.pending_counterparty_closing_signed = None;
4067 self.context.closing_fee_limits = None;
4069 let mut inbound_drop_count = 0;
4070 self.context.pending_inbound_htlcs.retain(|htlc| {
4072 InboundHTLCState::RemoteAnnounced(_) => {
4073 // They sent us an update_add_htlc but we never got the commitment_signed.
4074 // We'll tell them what commitment_signed we're expecting next and they'll drop
4075 // this HTLC accordingly
4076 inbound_drop_count += 1;
4079 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4080 // We received a commitment_signed updating this HTLC and (at least hopefully)
4081 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4082 // in response to it yet, so don't touch it.
4085 InboundHTLCState::Committed => true,
4086 InboundHTLCState::LocalRemoved(_) => {
4087 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4088 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4089 // (that we missed). Keep this around for now and if they tell us they missed
4090 // the commitment_signed we can re-transmit the update then.
4095 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4097 if let Some((_, update_state)) = self.context.pending_update_fee {
4098 if update_state == FeeUpdateState::RemoteAnnounced {
4099 debug_assert!(!self.context.is_outbound());
4100 self.context.pending_update_fee = None;
4104 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4105 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4106 // They sent us an update to remove this but haven't yet sent the corresponding
4107 // commitment_signed, we need to move it back to Committed and they can re-send
4108 // the update upon reconnection.
4109 htlc.state = OutboundHTLCState::Committed;
4113 self.context.sent_message_awaiting_response = None;
4115 self.context.channel_state.set_peer_disconnected();
4116 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4120 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4121 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4122 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4123 /// update completes (potentially immediately).
4124 /// The messages which were generated with the monitor update must *not* have been sent to the
4125 /// remote end, and must instead have been dropped. They will be regenerated when
4126 /// [`Self::monitor_updating_restored`] is called.
4128 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4129 /// [`chain::Watch`]: crate::chain::Watch
4130 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4131 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4132 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4133 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4134 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4136 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4137 self.context.monitor_pending_commitment_signed |= resend_commitment;
4138 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4139 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4140 self.context.monitor_pending_failures.append(&mut pending_fails);
4141 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4142 self.context.channel_state.set_monitor_update_in_progress();
4145 /// Indicates that the latest ChannelMonitor update has been committed by the client
4146 /// successfully and we should restore normal operation. Returns messages which should be sent
4147 /// to the remote side.
4148 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4149 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4150 user_config: &UserConfig, best_block_height: u32
4151 ) -> MonitorRestoreUpdates
4154 NS::Target: NodeSigner
4156 assert!(self.context.channel_state.is_monitor_update_in_progress());
4157 self.context.channel_state.clear_monitor_update_in_progress();
4159 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4160 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4161 // first received the funding_signed.
4162 let mut funding_broadcastable =
4163 if self.context.is_outbound() &&
4164 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4165 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4167 self.context.funding_transaction.take()
4169 // That said, if the funding transaction is already confirmed (ie we're active with a
4170 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4171 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4172 funding_broadcastable = None;
4175 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4176 // (and we assume the user never directly broadcasts the funding transaction and waits for
4177 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4178 // * an inbound channel that failed to persist the monitor on funding_created and we got
4179 // the funding transaction confirmed before the monitor was persisted, or
4180 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4181 let channel_ready = if self.context.monitor_pending_channel_ready {
4182 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4183 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4184 self.context.monitor_pending_channel_ready = false;
4185 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4186 Some(msgs::ChannelReady {
4187 channel_id: self.context.channel_id(),
4188 next_per_commitment_point,
4189 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4193 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4195 let mut accepted_htlcs = Vec::new();
4196 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4197 let mut failed_htlcs = Vec::new();
4198 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4199 let mut finalized_claimed_htlcs = Vec::new();
4200 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4202 if self.context.channel_state.is_peer_disconnected() {
4203 self.context.monitor_pending_revoke_and_ack = false;
4204 self.context.monitor_pending_commitment_signed = false;
4205 return MonitorRestoreUpdates {
4206 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4207 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4211 let raa = if self.context.monitor_pending_revoke_and_ack {
4212 Some(self.get_last_revoke_and_ack())
4214 let commitment_update = if self.context.monitor_pending_commitment_signed {
4215 self.get_last_commitment_update_for_send(logger).ok()
4217 if commitment_update.is_some() {
4218 self.mark_awaiting_response();
4221 self.context.monitor_pending_revoke_and_ack = false;
4222 self.context.monitor_pending_commitment_signed = false;
4223 let order = self.context.resend_order.clone();
4224 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4225 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4226 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4227 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4228 MonitorRestoreUpdates {
4229 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4233 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4234 where F::Target: FeeEstimator, L::Target: Logger
4236 if self.context.is_outbound() {
4237 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4239 if self.context.channel_state.is_peer_disconnected() {
4240 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4242 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4244 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4245 self.context.update_time_counter += 1;
4246 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4247 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4248 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4249 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4250 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4251 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4252 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4253 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4254 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4255 msg.feerate_per_kw, holder_tx_dust_exposure)));
4257 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4258 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4259 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4265 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4267 #[cfg(async_signing)]
4268 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4269 let commitment_update = if self.context.signer_pending_commitment_update {
4270 self.get_last_commitment_update_for_send(logger).ok()
4272 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4273 self.context.get_funding_signed_msg(logger).1
4275 let channel_ready = if funding_signed.is_some() {
4276 self.check_get_channel_ready(0)
4279 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4280 if commitment_update.is_some() { "a" } else { "no" },
4281 if funding_signed.is_some() { "a" } else { "no" },
4282 if channel_ready.is_some() { "a" } else { "no" });
4284 SignerResumeUpdates {
4291 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4292 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4293 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4294 msgs::RevokeAndACK {
4295 channel_id: self.context.channel_id,
4296 per_commitment_secret,
4297 next_per_commitment_point,
4299 next_local_nonce: None,
4303 /// Gets the last commitment update for immediate sending to our peer.
4304 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4305 let mut update_add_htlcs = Vec::new();
4306 let mut update_fulfill_htlcs = Vec::new();
4307 let mut update_fail_htlcs = Vec::new();
4308 let mut update_fail_malformed_htlcs = Vec::new();
4310 for htlc in self.context.pending_outbound_htlcs.iter() {
4311 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4312 update_add_htlcs.push(msgs::UpdateAddHTLC {
4313 channel_id: self.context.channel_id(),
4314 htlc_id: htlc.htlc_id,
4315 amount_msat: htlc.amount_msat,
4316 payment_hash: htlc.payment_hash,
4317 cltv_expiry: htlc.cltv_expiry,
4318 onion_routing_packet: (**onion_packet).clone(),
4319 skimmed_fee_msat: htlc.skimmed_fee_msat,
4320 blinding_point: htlc.blinding_point,
4325 for htlc in self.context.pending_inbound_htlcs.iter() {
4326 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4328 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4329 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4330 channel_id: self.context.channel_id(),
4331 htlc_id: htlc.htlc_id,
4332 reason: err_packet.clone()
4335 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4336 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4337 channel_id: self.context.channel_id(),
4338 htlc_id: htlc.htlc_id,
4339 sha256_of_onion: sha256_of_onion.clone(),
4340 failure_code: failure_code.clone(),
4343 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4344 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4345 channel_id: self.context.channel_id(),
4346 htlc_id: htlc.htlc_id,
4347 payment_preimage: payment_preimage.clone(),
4354 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4355 Some(msgs::UpdateFee {
4356 channel_id: self.context.channel_id(),
4357 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4361 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4362 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4363 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4364 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4365 if self.context.signer_pending_commitment_update {
4366 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4367 self.context.signer_pending_commitment_update = false;
4371 #[cfg(not(async_signing))] {
4372 panic!("Failed to get signature for new commitment state");
4374 #[cfg(async_signing)] {
4375 if !self.context.signer_pending_commitment_update {
4376 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4377 self.context.signer_pending_commitment_update = true;
4382 Ok(msgs::CommitmentUpdate {
4383 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4388 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4389 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4390 if self.context.channel_state.is_local_shutdown_sent() {
4391 assert!(self.context.shutdown_scriptpubkey.is_some());
4392 Some(msgs::Shutdown {
4393 channel_id: self.context.channel_id,
4394 scriptpubkey: self.get_closing_scriptpubkey(),
4399 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4400 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4402 /// Some links printed in log lines are included here to check them during build (when run with
4403 /// `cargo doc --document-private-items`):
4404 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4405 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4406 pub fn channel_reestablish<L: Deref, NS: Deref>(
4407 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4408 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4409 ) -> Result<ReestablishResponses, ChannelError>
4412 NS::Target: NodeSigner
4414 if !self.context.channel_state.is_peer_disconnected() {
4415 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4416 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4417 // just close here instead of trying to recover.
4418 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4421 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4422 msg.next_local_commitment_number == 0 {
4423 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4426 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4427 if msg.next_remote_commitment_number > 0 {
4428 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4429 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4430 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4431 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4432 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4434 if msg.next_remote_commitment_number > our_commitment_transaction {
4435 macro_rules! log_and_panic {
4436 ($err_msg: expr) => {
4437 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4438 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4441 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4442 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4443 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4444 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4445 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4446 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4447 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4448 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4452 // Before we change the state of the channel, we check if the peer is sending a very old
4453 // commitment transaction number, if yes we send a warning message.
4454 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4455 return Err(ChannelError::Warn(format!(
4456 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4457 msg.next_remote_commitment_number,
4458 our_commitment_transaction
4462 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4463 // remaining cases either succeed or ErrorMessage-fail).
4464 self.context.channel_state.clear_peer_disconnected();
4465 self.context.sent_message_awaiting_response = None;
4467 let shutdown_msg = self.get_outbound_shutdown();
4469 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4471 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4472 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4473 if !self.context.channel_state.is_our_channel_ready() ||
4474 self.context.channel_state.is_monitor_update_in_progress() {
4475 if msg.next_remote_commitment_number != 0 {
4476 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4478 // Short circuit the whole handler as there is nothing we can resend them
4479 return Ok(ReestablishResponses {
4480 channel_ready: None,
4481 raa: None, commitment_update: None,
4482 order: RAACommitmentOrder::CommitmentFirst,
4483 shutdown_msg, announcement_sigs,
4487 // We have OurChannelReady set!
4488 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4489 return Ok(ReestablishResponses {
4490 channel_ready: Some(msgs::ChannelReady {
4491 channel_id: self.context.channel_id(),
4492 next_per_commitment_point,
4493 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4495 raa: None, commitment_update: None,
4496 order: RAACommitmentOrder::CommitmentFirst,
4497 shutdown_msg, announcement_sigs,
4501 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4502 // Remote isn't waiting on any RevokeAndACK from us!
4503 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4505 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4506 if self.context.channel_state.is_monitor_update_in_progress() {
4507 self.context.monitor_pending_revoke_and_ack = true;
4510 Some(self.get_last_revoke_and_ack())
4513 debug_assert!(false, "All values should have been handled in the four cases above");
4514 return Err(ChannelError::Close(format!(
4515 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4516 msg.next_remote_commitment_number,
4517 our_commitment_transaction
4521 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4522 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4523 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4524 // the corresponding revoke_and_ack back yet.
4525 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4526 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4527 self.mark_awaiting_response();
4529 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4531 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4532 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4533 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4534 Some(msgs::ChannelReady {
4535 channel_id: self.context.channel_id(),
4536 next_per_commitment_point,
4537 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4541 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4542 if required_revoke.is_some() {
4543 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4545 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4548 Ok(ReestablishResponses {
4549 channel_ready, shutdown_msg, announcement_sigs,
4550 raa: required_revoke,
4551 commitment_update: None,
4552 order: self.context.resend_order.clone(),
4554 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4555 if required_revoke.is_some() {
4556 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4558 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4561 if self.context.channel_state.is_monitor_update_in_progress() {
4562 self.context.monitor_pending_commitment_signed = true;
4563 Ok(ReestablishResponses {
4564 channel_ready, shutdown_msg, announcement_sigs,
4565 commitment_update: None, raa: None,
4566 order: self.context.resend_order.clone(),
4569 Ok(ReestablishResponses {
4570 channel_ready, shutdown_msg, announcement_sigs,
4571 raa: required_revoke,
4572 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4573 order: self.context.resend_order.clone(),
4576 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4577 Err(ChannelError::Close(format!(
4578 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4579 msg.next_local_commitment_number,
4580 next_counterparty_commitment_number,
4583 Err(ChannelError::Close(format!(
4584 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4585 msg.next_local_commitment_number,
4586 next_counterparty_commitment_number,
4591 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4592 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4593 /// at which point they will be recalculated.
4594 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4596 where F::Target: FeeEstimator
4598 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4600 // Propose a range from our current Background feerate to our Normal feerate plus our
4601 // force_close_avoidance_max_fee_satoshis.
4602 // If we fail to come to consensus, we'll have to force-close.
4603 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4604 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4605 // that we don't expect to need fee bumping
4606 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4607 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4609 // The spec requires that (when the channel does not have anchors) we only send absolute
4610 // channel fees no greater than the absolute channel fee on the current commitment
4611 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4612 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4613 // some force-closure by old nodes, but we wanted to close the channel anyway.
4615 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4616 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4617 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4618 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4621 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4622 // below our dust limit, causing the output to disappear. We don't bother handling this
4623 // case, however, as this should only happen if a channel is closed before any (material)
4624 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4625 // come to consensus with our counterparty on appropriate fees, however it should be a
4626 // relatively rare case. We can revisit this later, though note that in order to determine
4627 // if the funders' output is dust we have to know the absolute fee we're going to use.
4628 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4629 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4630 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4631 // We always add force_close_avoidance_max_fee_satoshis to our normal
4632 // feerate-calculated fee, but allow the max to be overridden if we're using a
4633 // target feerate-calculated fee.
4634 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4635 proposed_max_feerate as u64 * tx_weight / 1000)
4637 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4640 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4641 self.context.closing_fee_limits.clone().unwrap()
4644 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4645 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4646 /// this point if we're the funder we should send the initial closing_signed, and in any case
4647 /// shutdown should complete within a reasonable timeframe.
4648 fn closing_negotiation_ready(&self) -> bool {
4649 self.context.closing_negotiation_ready()
4652 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4653 /// an Err if no progress is being made and the channel should be force-closed instead.
4654 /// Should be called on a one-minute timer.
4655 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4656 if self.closing_negotiation_ready() {
4657 if self.context.closing_signed_in_flight {
4658 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4660 self.context.closing_signed_in_flight = true;
4666 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4667 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4668 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4669 where F::Target: FeeEstimator, L::Target: Logger
4671 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4672 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4673 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4674 // that closing_negotiation_ready checks this case (as well as a few others).
4675 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4676 return Ok((None, None, None));
4679 if !self.context.is_outbound() {
4680 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4681 return self.closing_signed(fee_estimator, &msg);
4683 return Ok((None, None, None));
4686 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4687 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4688 if self.context.expecting_peer_commitment_signed {
4689 return Ok((None, None, None));
4692 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4694 assert!(self.context.shutdown_scriptpubkey.is_some());
4695 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4696 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4697 our_min_fee, our_max_fee, total_fee_satoshis);
4699 match &self.context.holder_signer {
4700 ChannelSignerType::Ecdsa(ecdsa) => {
4702 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4703 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4705 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4706 Ok((Some(msgs::ClosingSigned {
4707 channel_id: self.context.channel_id,
4708 fee_satoshis: total_fee_satoshis,
4710 fee_range: Some(msgs::ClosingSignedFeeRange {
4711 min_fee_satoshis: our_min_fee,
4712 max_fee_satoshis: our_max_fee,
4716 // TODO (taproot|arik)
4722 // Marks a channel as waiting for a response from the counterparty. If it's not received
4723 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4725 fn mark_awaiting_response(&mut self) {
4726 self.context.sent_message_awaiting_response = Some(0);
4729 /// Determines whether we should disconnect the counterparty due to not receiving a response
4730 /// within our expected timeframe.
4732 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4733 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4734 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4737 // Don't disconnect when we're not waiting on a response.
4740 *ticks_elapsed += 1;
4741 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4745 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4746 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4748 if self.context.channel_state.is_peer_disconnected() {
4749 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4751 if self.context.channel_state.is_pre_funded_state() {
4752 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4753 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4754 // can do that via error message without getting a connection fail anyway...
4755 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4757 for htlc in self.context.pending_inbound_htlcs.iter() {
4758 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4759 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4762 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4764 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4765 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4768 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4769 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4770 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4773 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4776 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4777 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4778 // any further commitment updates after we set LocalShutdownSent.
4779 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4781 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4784 assert!(send_shutdown);
4785 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4786 Ok(scriptpubkey) => scriptpubkey,
4787 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4789 if !shutdown_scriptpubkey.is_compatible(their_features) {
4790 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4792 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4797 // From here on out, we may not fail!
4799 self.context.channel_state.set_remote_shutdown_sent();
4800 self.context.update_time_counter += 1;
4802 let monitor_update = if update_shutdown_script {
4803 self.context.latest_monitor_update_id += 1;
4804 let monitor_update = ChannelMonitorUpdate {
4805 update_id: self.context.latest_monitor_update_id,
4806 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4807 scriptpubkey: self.get_closing_scriptpubkey(),
4810 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4811 self.push_ret_blockable_mon_update(monitor_update)
4813 let shutdown = if send_shutdown {
4814 Some(msgs::Shutdown {
4815 channel_id: self.context.channel_id,
4816 scriptpubkey: self.get_closing_scriptpubkey(),
4820 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4821 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4822 // cell HTLCs and return them to fail the payment.
4823 self.context.holding_cell_update_fee = None;
4824 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4825 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4827 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4828 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4835 self.context.channel_state.set_local_shutdown_sent();
4836 self.context.update_time_counter += 1;
4838 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4841 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4842 let mut tx = closing_tx.trust().built_transaction().clone();
4844 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4846 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4847 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4848 let mut holder_sig = sig.serialize_der().to_vec();
4849 holder_sig.push(EcdsaSighashType::All as u8);
4850 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4851 cp_sig.push(EcdsaSighashType::All as u8);
4852 if funding_key[..] < counterparty_funding_key[..] {
4853 tx.input[0].witness.push(holder_sig);
4854 tx.input[0].witness.push(cp_sig);
4856 tx.input[0].witness.push(cp_sig);
4857 tx.input[0].witness.push(holder_sig);
4860 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4864 pub fn closing_signed<F: Deref>(
4865 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4866 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4867 where F::Target: FeeEstimator
4869 if !self.context.channel_state.is_both_sides_shutdown() {
4870 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4872 if self.context.channel_state.is_peer_disconnected() {
4873 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4875 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4876 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4878 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4879 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4882 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4883 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4886 if self.context.channel_state.is_monitor_update_in_progress() {
4887 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4888 return Ok((None, None, None));
4891 let funding_redeemscript = self.context.get_funding_redeemscript();
4892 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4893 if used_total_fee != msg.fee_satoshis {
4894 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4896 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4898 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4901 // The remote end may have decided to revoke their output due to inconsistent dust
4902 // limits, so check for that case by re-checking the signature here.
4903 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4904 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4905 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4909 for outp in closing_tx.trust().built_transaction().output.iter() {
4910 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4911 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4915 assert!(self.context.shutdown_scriptpubkey.is_some());
4916 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4917 if last_fee == msg.fee_satoshis {
4918 let shutdown_result = ShutdownResult {
4919 monitor_update: None,
4920 dropped_outbound_htlcs: Vec::new(),
4921 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4922 channel_id: self.context.channel_id,
4923 counterparty_node_id: self.context.counterparty_node_id,
4925 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4926 self.context.channel_state = ChannelState::ShutdownComplete;
4927 self.context.update_time_counter += 1;
4928 return Ok((None, Some(tx), Some(shutdown_result)));
4932 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4934 macro_rules! propose_fee {
4935 ($new_fee: expr) => {
4936 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4937 (closing_tx, $new_fee)
4939 self.build_closing_transaction($new_fee, false)
4942 return match &self.context.holder_signer {
4943 ChannelSignerType::Ecdsa(ecdsa) => {
4945 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4946 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4947 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4948 let shutdown_result = ShutdownResult {
4949 monitor_update: None,
4950 dropped_outbound_htlcs: Vec::new(),
4951 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4952 channel_id: self.context.channel_id,
4953 counterparty_node_id: self.context.counterparty_node_id,
4955 self.context.channel_state = ChannelState::ShutdownComplete;
4956 self.context.update_time_counter += 1;
4957 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4958 (Some(tx), Some(shutdown_result))
4963 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4964 Ok((Some(msgs::ClosingSigned {
4965 channel_id: self.context.channel_id,
4966 fee_satoshis: used_fee,
4968 fee_range: Some(msgs::ClosingSignedFeeRange {
4969 min_fee_satoshis: our_min_fee,
4970 max_fee_satoshis: our_max_fee,
4972 }), signed_tx, shutdown_result))
4974 // TODO (taproot|arik)
4981 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4982 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4983 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4985 if max_fee_satoshis < our_min_fee {
4986 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4988 if min_fee_satoshis > our_max_fee {
4989 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4992 if !self.context.is_outbound() {
4993 // They have to pay, so pick the highest fee in the overlapping range.
4994 // We should never set an upper bound aside from their full balance
4995 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4996 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4998 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4999 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5000 msg.fee_satoshis, our_min_fee, our_max_fee)));
5002 // The proposed fee is in our acceptable range, accept it and broadcast!
5003 propose_fee!(msg.fee_satoshis);
5006 // Old fee style negotiation. We don't bother to enforce whether they are complying
5007 // with the "making progress" requirements, we just comply and hope for the best.
5008 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5009 if msg.fee_satoshis > last_fee {
5010 if msg.fee_satoshis < our_max_fee {
5011 propose_fee!(msg.fee_satoshis);
5012 } else if last_fee < our_max_fee {
5013 propose_fee!(our_max_fee);
5015 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5018 if msg.fee_satoshis > our_min_fee {
5019 propose_fee!(msg.fee_satoshis);
5020 } else if last_fee > our_min_fee {
5021 propose_fee!(our_min_fee);
5023 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5027 if msg.fee_satoshis < our_min_fee {
5028 propose_fee!(our_min_fee);
5029 } else if msg.fee_satoshis > our_max_fee {
5030 propose_fee!(our_max_fee);
5032 propose_fee!(msg.fee_satoshis);
5038 fn internal_htlc_satisfies_config(
5039 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5040 ) -> Result<(), (&'static str, u16)> {
5041 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5042 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5043 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5044 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5046 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5047 0x1000 | 12, // fee_insufficient
5050 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5052 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5053 0x1000 | 13, // incorrect_cltv_expiry
5059 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5060 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5061 /// unsuccessful, falls back to the previous one if one exists.
5062 pub fn htlc_satisfies_config(
5063 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5064 ) -> Result<(), (&'static str, u16)> {
5065 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5067 if let Some(prev_config) = self.context.prev_config() {
5068 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5075 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5076 self.context.cur_holder_commitment_transaction_number + 1
5079 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5080 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5083 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5084 self.context.cur_counterparty_commitment_transaction_number + 2
5088 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5089 &self.context.holder_signer
5093 pub fn get_value_stat(&self) -> ChannelValueStat {
5095 value_to_self_msat: self.context.value_to_self_msat,
5096 channel_value_msat: self.context.channel_value_satoshis * 1000,
5097 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5098 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5099 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5100 holding_cell_outbound_amount_msat: {
5102 for h in self.context.holding_cell_htlc_updates.iter() {
5104 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5112 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5113 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5117 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5118 /// Allowed in any state (including after shutdown)
5119 pub fn is_awaiting_monitor_update(&self) -> bool {
5120 self.context.channel_state.is_monitor_update_in_progress()
5123 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5124 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5125 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5126 self.context.blocked_monitor_updates[0].update.update_id - 1
5129 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5130 /// further blocked monitor update exists after the next.
5131 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5132 if self.context.blocked_monitor_updates.is_empty() { return None; }
5133 Some((self.context.blocked_monitor_updates.remove(0).update,
5134 !self.context.blocked_monitor_updates.is_empty()))
5137 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5138 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5139 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5140 -> Option<ChannelMonitorUpdate> {
5141 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5142 if !release_monitor {
5143 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5152 pub fn blocked_monitor_updates_pending(&self) -> usize {
5153 self.context.blocked_monitor_updates.len()
5156 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5157 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5158 /// transaction. If the channel is inbound, this implies simply that the channel has not
5160 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5161 if !self.is_awaiting_monitor_update() { return false; }
5163 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5164 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5166 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5167 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5168 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5171 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5172 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5173 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5174 // waiting for the initial monitor persistence. Thus, we check if our commitment
5175 // transaction numbers have both been iterated only exactly once (for the
5176 // funding_signed), and we're awaiting monitor update.
5178 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5179 // only way to get an awaiting-monitor-update state during initial funding is if the
5180 // initial monitor persistence is still pending).
5182 // Because deciding we're awaiting initial broadcast spuriously could result in
5183 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5184 // we hard-assert here, even in production builds.
5185 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5186 assert!(self.context.monitor_pending_channel_ready);
5187 assert_eq!(self.context.latest_monitor_update_id, 0);
5193 /// Returns true if our channel_ready has been sent
5194 pub fn is_our_channel_ready(&self) -> bool {
5195 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5196 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5199 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5200 pub fn received_shutdown(&self) -> bool {
5201 self.context.channel_state.is_remote_shutdown_sent()
5204 /// Returns true if we either initiated or agreed to shut down the channel.
5205 pub fn sent_shutdown(&self) -> bool {
5206 self.context.channel_state.is_local_shutdown_sent()
5209 /// Returns true if this channel is fully shut down. True here implies that no further actions
5210 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5211 /// will be handled appropriately by the chain monitor.
5212 pub fn is_shutdown(&self) -> bool {
5213 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5216 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5217 self.context.channel_update_status
5220 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5221 self.context.update_time_counter += 1;
5222 self.context.channel_update_status = status;
5225 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5227 // * always when a new block/transactions are confirmed with the new height
5228 // * when funding is signed with a height of 0
5229 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5233 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5234 if funding_tx_confirmations <= 0 {
5235 self.context.funding_tx_confirmation_height = 0;
5238 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5242 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5243 // channel_ready yet.
5244 if self.context.signer_pending_funding {
5248 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5249 // channel_ready until the entire batch is ready.
5250 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5251 self.context.channel_state.set_our_channel_ready();
5253 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5254 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5255 self.context.update_time_counter += 1;
5257 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5258 // We got a reorg but not enough to trigger a force close, just ignore.
5261 if self.context.funding_tx_confirmation_height != 0 &&
5262 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5264 // We should never see a funding transaction on-chain until we've received
5265 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5266 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5267 // however, may do this and we shouldn't treat it as a bug.
5268 #[cfg(not(fuzzing))]
5269 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5270 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5271 self.context.channel_state.to_u32());
5273 // We got a reorg but not enough to trigger a force close, just ignore.
5277 if need_commitment_update {
5278 if !self.context.channel_state.is_monitor_update_in_progress() {
5279 if !self.context.channel_state.is_peer_disconnected() {
5280 let next_per_commitment_point =
5281 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5282 return Some(msgs::ChannelReady {
5283 channel_id: self.context.channel_id,
5284 next_per_commitment_point,
5285 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5289 self.context.monitor_pending_channel_ready = true;
5295 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5296 /// In the first case, we store the confirmation height and calculating the short channel id.
5297 /// In the second, we simply return an Err indicating we need to be force-closed now.
5298 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5299 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5300 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5301 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5303 NS::Target: NodeSigner,
5306 let mut msgs = (None, None);
5307 if let Some(funding_txo) = self.context.get_funding_txo() {
5308 for &(index_in_block, tx) in txdata.iter() {
5309 // Check if the transaction is the expected funding transaction, and if it is,
5310 // check that it pays the right amount to the right script.
5311 if self.context.funding_tx_confirmation_height == 0 {
5312 if tx.txid() == funding_txo.txid {
5313 let txo_idx = funding_txo.index as usize;
5314 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5315 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5316 if self.context.is_outbound() {
5317 // If we generated the funding transaction and it doesn't match what it
5318 // should, the client is really broken and we should just panic and
5319 // tell them off. That said, because hash collisions happen with high
5320 // probability in fuzzing mode, if we're fuzzing we just close the
5321 // channel and move on.
5322 #[cfg(not(fuzzing))]
5323 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5325 self.context.update_time_counter += 1;
5326 let err_reason = "funding tx had wrong script/value or output index";
5327 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5329 if self.context.is_outbound() {
5330 if !tx.is_coin_base() {
5331 for input in tx.input.iter() {
5332 if input.witness.is_empty() {
5333 // We generated a malleable funding transaction, implying we've
5334 // just exposed ourselves to funds loss to our counterparty.
5335 #[cfg(not(fuzzing))]
5336 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5341 self.context.funding_tx_confirmation_height = height;
5342 self.context.funding_tx_confirmed_in = Some(*block_hash);
5343 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5344 Ok(scid) => Some(scid),
5345 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5348 // If this is a coinbase transaction and not a 0-conf channel
5349 // we should update our min_depth to 100 to handle coinbase maturity
5350 if tx.is_coin_base() &&
5351 self.context.minimum_depth.unwrap_or(0) > 0 &&
5352 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5353 self.context.minimum_depth = Some(COINBASE_MATURITY);
5356 // If we allow 1-conf funding, we may need to check for channel_ready here and
5357 // send it immediately instead of waiting for a best_block_updated call (which
5358 // may have already happened for this block).
5359 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5360 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5361 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5362 msgs = (Some(channel_ready), announcement_sigs);
5365 for inp in tx.input.iter() {
5366 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5367 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5368 return Err(ClosureReason::CommitmentTxConfirmed);
5376 /// When a new block is connected, we check the height of the block against outbound holding
5377 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5378 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5379 /// handled by the ChannelMonitor.
5381 /// If we return Err, the channel may have been closed, at which point the standard
5382 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5385 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5387 pub fn best_block_updated<NS: Deref, L: Deref>(
5388 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5389 node_signer: &NS, user_config: &UserConfig, logger: &L
5390 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5392 NS::Target: NodeSigner,
5395 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5398 fn do_best_block_updated<NS: Deref, L: Deref>(
5399 &mut self, height: u32, highest_header_time: u32,
5400 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5401 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5403 NS::Target: NodeSigner,
5406 let mut timed_out_htlcs = Vec::new();
5407 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5408 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5410 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5411 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5413 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5414 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5415 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5423 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5425 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5426 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5427 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5429 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5430 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5433 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5434 self.context.channel_state.is_our_channel_ready() {
5435 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5436 if self.context.funding_tx_confirmation_height == 0 {
5437 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5438 // zero if it has been reorged out, however in either case, our state flags
5439 // indicate we've already sent a channel_ready
5440 funding_tx_confirmations = 0;
5443 // If we've sent channel_ready (or have both sent and received channel_ready), and
5444 // the funding transaction has become unconfirmed,
5445 // close the channel and hope we can get the latest state on chain (because presumably
5446 // the funding transaction is at least still in the mempool of most nodes).
5448 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5449 // 0-conf channel, but not doing so may lead to the
5450 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5452 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5453 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5454 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5455 return Err(ClosureReason::ProcessingError { err: err_reason });
5457 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5458 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5459 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5460 // If funding_tx_confirmed_in is unset, the channel must not be active
5461 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5462 assert!(!self.context.channel_state.is_our_channel_ready());
5463 return Err(ClosureReason::FundingTimedOut);
5466 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5467 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5469 Ok((None, timed_out_htlcs, announcement_sigs))
5472 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5473 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5474 /// before the channel has reached channel_ready and we can just wait for more blocks.
5475 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5476 if self.context.funding_tx_confirmation_height != 0 {
5477 // We handle the funding disconnection by calling best_block_updated with a height one
5478 // below where our funding was connected, implying a reorg back to conf_height - 1.
5479 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5480 // We use the time field to bump the current time we set on channel updates if its
5481 // larger. If we don't know that time has moved forward, we can just set it to the last
5482 // time we saw and it will be ignored.
5483 let best_time = self.context.update_time_counter;
5484 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5485 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5486 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5487 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5488 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5494 // We never learned about the funding confirmation anyway, just ignore
5499 // Methods to get unprompted messages to send to the remote end (or where we already returned
5500 // something in the handler for the message that prompted this message):
5502 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5503 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5504 /// directions). Should be used for both broadcasted announcements and in response to an
5505 /// AnnouncementSignatures message from the remote peer.
5507 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5510 /// This will only return ChannelError::Ignore upon failure.
5512 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5513 fn get_channel_announcement<NS: Deref>(
5514 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5515 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5516 if !self.context.config.announced_channel {
5517 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5519 if !self.context.is_usable() {
5520 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5523 let short_channel_id = self.context.get_short_channel_id()
5524 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5525 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5526 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5527 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5528 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5530 let msg = msgs::UnsignedChannelAnnouncement {
5531 features: channelmanager::provided_channel_features(&user_config),
5534 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5535 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5536 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5537 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5538 excess_data: Vec::new(),
5544 fn get_announcement_sigs<NS: Deref, L: Deref>(
5545 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5546 best_block_height: u32, logger: &L
5547 ) -> Option<msgs::AnnouncementSignatures>
5549 NS::Target: NodeSigner,
5552 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5556 if !self.context.is_usable() {
5560 if self.context.channel_state.is_peer_disconnected() {
5561 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5565 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5569 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5570 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5573 log_trace!(logger, "{:?}", e);
5577 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5579 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5584 match &self.context.holder_signer {
5585 ChannelSignerType::Ecdsa(ecdsa) => {
5586 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5588 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5593 let short_channel_id = match self.context.get_short_channel_id() {
5595 None => return None,
5598 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5600 Some(msgs::AnnouncementSignatures {
5601 channel_id: self.context.channel_id(),
5603 node_signature: our_node_sig,
5604 bitcoin_signature: our_bitcoin_sig,
5607 // TODO (taproot|arik)
5613 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5615 fn sign_channel_announcement<NS: Deref>(
5616 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5617 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5618 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5619 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5620 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5621 let were_node_one = announcement.node_id_1 == our_node_key;
5623 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5624 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5625 match &self.context.holder_signer {
5626 ChannelSignerType::Ecdsa(ecdsa) => {
5627 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5628 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5629 Ok(msgs::ChannelAnnouncement {
5630 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5631 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5632 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5633 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5634 contents: announcement,
5637 // TODO (taproot|arik)
5642 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5646 /// Processes an incoming announcement_signatures message, providing a fully-signed
5647 /// channel_announcement message which we can broadcast and storing our counterparty's
5648 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5649 pub fn announcement_signatures<NS: Deref>(
5650 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5651 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5652 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5653 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5655 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5657 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5658 return Err(ChannelError::Close(format!(
5659 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5660 &announcement, self.context.get_counterparty_node_id())));
5662 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5663 return Err(ChannelError::Close(format!(
5664 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5665 &announcement, self.context.counterparty_funding_pubkey())));
5668 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5669 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5670 return Err(ChannelError::Ignore(
5671 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5674 self.sign_channel_announcement(node_signer, announcement)
5677 /// Gets a signed channel_announcement for this channel, if we previously received an
5678 /// announcement_signatures from our counterparty.
5679 pub fn get_signed_channel_announcement<NS: Deref>(
5680 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5681 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5682 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5685 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5687 Err(_) => return None,
5689 match self.sign_channel_announcement(node_signer, announcement) {
5690 Ok(res) => Some(res),
5695 /// May panic if called on a channel that wasn't immediately-previously
5696 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5697 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5698 assert!(self.context.channel_state.is_peer_disconnected());
5699 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5700 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5701 // current to_remote balances. However, it no longer has any use, and thus is now simply
5702 // set to a dummy (but valid, as required by the spec) public key.
5703 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5704 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5705 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5706 let mut pk = [2; 33]; pk[1] = 0xff;
5707 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5708 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5709 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5710 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5713 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5716 self.mark_awaiting_response();
5717 msgs::ChannelReestablish {
5718 channel_id: self.context.channel_id(),
5719 // The protocol has two different commitment number concepts - the "commitment
5720 // transaction number", which starts from 0 and counts up, and the "revocation key
5721 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5722 // commitment transaction numbers by the index which will be used to reveal the
5723 // revocation key for that commitment transaction, which means we have to convert them
5724 // to protocol-level commitment numbers here...
5726 // next_local_commitment_number is the next commitment_signed number we expect to
5727 // receive (indicating if they need to resend one that we missed).
5728 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5729 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5730 // receive, however we track it by the next commitment number for a remote transaction
5731 // (which is one further, as they always revoke previous commitment transaction, not
5732 // the one we send) so we have to decrement by 1. Note that if
5733 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5734 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5736 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5737 your_last_per_commitment_secret: remote_last_secret,
5738 my_current_per_commitment_point: dummy_pubkey,
5739 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5740 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5741 // txid of that interactive transaction, else we MUST NOT set it.
5742 next_funding_txid: None,
5747 // Send stuff to our remote peers:
5749 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5750 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5751 /// commitment update.
5753 /// `Err`s will only be [`ChannelError::Ignore`].
5754 pub fn queue_add_htlc<F: Deref, L: Deref>(
5755 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5756 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5757 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5758 ) -> Result<(), ChannelError>
5759 where F::Target: FeeEstimator, L::Target: Logger
5762 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5763 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5764 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5766 if let ChannelError::Ignore(_) = err { /* fine */ }
5767 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5772 /// Adds a pending outbound HTLC to this channel, note that you probably want
5773 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5775 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5777 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5778 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5780 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5781 /// we may not yet have sent the previous commitment update messages and will need to
5782 /// regenerate them.
5784 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5785 /// on this [`Channel`] if `force_holding_cell` is false.
5787 /// `Err`s will only be [`ChannelError::Ignore`].
5788 fn send_htlc<F: Deref, L: Deref>(
5789 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5790 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5791 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5792 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5793 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5794 where F::Target: FeeEstimator, L::Target: Logger
5796 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5797 self.context.channel_state.is_local_shutdown_sent() ||
5798 self.context.channel_state.is_remote_shutdown_sent()
5800 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5802 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5803 if amount_msat > channel_total_msat {
5804 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5807 if amount_msat == 0 {
5808 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5811 let available_balances = self.context.get_available_balances(fee_estimator);
5812 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5813 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5814 available_balances.next_outbound_htlc_minimum_msat)));
5817 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5818 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5819 available_balances.next_outbound_htlc_limit_msat)));
5822 if self.context.channel_state.is_peer_disconnected() {
5823 // Note that this should never really happen, if we're !is_live() on receipt of an
5824 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5825 // the user to send directly into a !is_live() channel. However, if we
5826 // disconnected during the time the previous hop was doing the commitment dance we may
5827 // end up getting here after the forwarding delay. In any case, returning an
5828 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5829 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5832 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5833 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5834 payment_hash, amount_msat,
5835 if force_holding_cell { "into holding cell" }
5836 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5837 else { "to peer" });
5839 if need_holding_cell {
5840 force_holding_cell = true;
5843 // Now update local state:
5844 if force_holding_cell {
5845 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5850 onion_routing_packet,
5857 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5858 htlc_id: self.context.next_holder_htlc_id,
5860 payment_hash: payment_hash.clone(),
5862 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5868 let res = msgs::UpdateAddHTLC {
5869 channel_id: self.context.channel_id,
5870 htlc_id: self.context.next_holder_htlc_id,
5874 onion_routing_packet,
5878 self.context.next_holder_htlc_id += 1;
5883 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5884 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5885 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5886 // fail to generate this, we still are at least at a position where upgrading their status
5888 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5889 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5890 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5892 if let Some(state) = new_state {
5893 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5897 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5898 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5899 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5900 // Grab the preimage, if it exists, instead of cloning
5901 let mut reason = OutboundHTLCOutcome::Success(None);
5902 mem::swap(outcome, &mut reason);
5903 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5906 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5907 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5908 debug_assert!(!self.context.is_outbound());
5909 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5910 self.context.feerate_per_kw = feerate;
5911 self.context.pending_update_fee = None;
5914 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5916 let (mut htlcs_ref, counterparty_commitment_tx) =
5917 self.build_commitment_no_state_update(logger);
5918 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5919 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5920 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5922 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5923 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5926 self.context.latest_monitor_update_id += 1;
5927 let monitor_update = ChannelMonitorUpdate {
5928 update_id: self.context.latest_monitor_update_id,
5929 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5930 commitment_txid: counterparty_commitment_txid,
5931 htlc_outputs: htlcs.clone(),
5932 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5933 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5934 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5935 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5936 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5939 self.context.channel_state.set_awaiting_remote_revoke();
5943 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5944 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5945 where L::Target: Logger
5947 let counterparty_keys = self.context.build_remote_transaction_keys();
5948 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5949 let counterparty_commitment_tx = commitment_stats.tx;
5951 #[cfg(any(test, fuzzing))]
5953 if !self.context.is_outbound() {
5954 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5955 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5956 if let Some(info) = projected_commit_tx_info {
5957 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5958 if info.total_pending_htlcs == total_pending_htlcs
5959 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5960 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5961 && info.feerate == self.context.feerate_per_kw {
5962 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5963 assert_eq!(actual_fee, info.fee);
5969 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5972 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5973 /// generation when we shouldn't change HTLC/channel state.
5974 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5975 // Get the fee tests from `build_commitment_no_state_update`
5976 #[cfg(any(test, fuzzing))]
5977 self.build_commitment_no_state_update(logger);
5979 let counterparty_keys = self.context.build_remote_transaction_keys();
5980 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5981 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5983 match &self.context.holder_signer {
5984 ChannelSignerType::Ecdsa(ecdsa) => {
5985 let (signature, htlc_signatures);
5988 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5989 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5993 let res = ecdsa.sign_counterparty_commitment(
5994 &commitment_stats.tx,
5995 commitment_stats.inbound_htlc_preimages,
5996 commitment_stats.outbound_htlc_preimages,
5997 &self.context.secp_ctx,
5998 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6000 htlc_signatures = res.1;
6002 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6003 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6004 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6005 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6007 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6008 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6009 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6010 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6011 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6012 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6016 Ok((msgs::CommitmentSigned {
6017 channel_id: self.context.channel_id,
6021 partial_signature_with_nonce: None,
6022 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6024 // TODO (taproot|arik)
6030 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6031 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6033 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6034 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6035 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6036 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6037 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6038 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6039 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6040 where F::Target: FeeEstimator, L::Target: Logger
6042 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6043 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6044 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6047 let monitor_update = self.build_commitment_no_status_check(logger);
6048 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6049 Ok(self.push_ret_blockable_mon_update(monitor_update))
6055 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6057 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6058 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6059 fee_base_msat: msg.contents.fee_base_msat,
6060 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6061 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6063 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6065 self.context.counterparty_forwarding_info = new_forwarding_info;
6071 /// Begins the shutdown process, getting a message for the remote peer and returning all
6072 /// holding cell HTLCs for payment failure.
6073 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6074 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6075 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6077 for htlc in self.context.pending_outbound_htlcs.iter() {
6078 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6079 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6082 if self.context.channel_state.is_local_shutdown_sent() {
6083 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6085 else if self.context.channel_state.is_remote_shutdown_sent() {
6086 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6088 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6089 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6091 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6092 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6093 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6096 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6099 // use override shutdown script if provided
6100 let shutdown_scriptpubkey = match override_shutdown_script {
6101 Some(script) => script,
6103 // otherwise, use the shutdown scriptpubkey provided by the signer
6104 match signer_provider.get_shutdown_scriptpubkey() {
6105 Ok(scriptpubkey) => scriptpubkey,
6106 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6110 if !shutdown_scriptpubkey.is_compatible(their_features) {
6111 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6113 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6118 // From here on out, we may not fail!
6119 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6120 self.context.channel_state.set_local_shutdown_sent();
6121 self.context.update_time_counter += 1;
6123 let monitor_update = if update_shutdown_script {
6124 self.context.latest_monitor_update_id += 1;
6125 let monitor_update = ChannelMonitorUpdate {
6126 update_id: self.context.latest_monitor_update_id,
6127 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6128 scriptpubkey: self.get_closing_scriptpubkey(),
6131 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6132 self.push_ret_blockable_mon_update(monitor_update)
6134 let shutdown = msgs::Shutdown {
6135 channel_id: self.context.channel_id,
6136 scriptpubkey: self.get_closing_scriptpubkey(),
6139 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6140 // our shutdown until we've committed all of the pending changes.
6141 self.context.holding_cell_update_fee = None;
6142 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6143 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6145 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6146 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6153 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6154 "we can't both complete shutdown and return a monitor update");
6156 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6159 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6160 self.context.holding_cell_htlc_updates.iter()
6161 .flat_map(|htlc_update| {
6163 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6164 => Some((source, payment_hash)),
6168 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6172 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6173 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6174 pub context: ChannelContext<SP>,
6175 pub unfunded_context: UnfundedChannelContext,
6178 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6179 pub fn new<ES: Deref, F: Deref>(
6180 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6181 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6182 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6183 ) -> Result<OutboundV1Channel<SP>, APIError>
6184 where ES::Target: EntropySource,
6185 F::Target: FeeEstimator
6187 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6188 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6189 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6190 let pubkeys = holder_signer.pubkeys().clone();
6192 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6193 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6195 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6196 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6198 let channel_value_msat = channel_value_satoshis * 1000;
6199 if push_msat > channel_value_msat {
6200 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6202 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6203 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6205 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6206 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6207 // Protocol level safety check in place, although it should never happen because
6208 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6209 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6212 let channel_type = Self::get_initial_channel_type(&config, their_features);
6213 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6215 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6216 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6218 (ConfirmationTarget::NonAnchorChannelFee, 0)
6220 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6222 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6223 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6224 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6225 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6228 let mut secp_ctx = Secp256k1::new();
6229 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6231 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6232 match signer_provider.get_shutdown_scriptpubkey() {
6233 Ok(scriptpubkey) => Some(scriptpubkey),
6234 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6238 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6239 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6240 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6244 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6245 Ok(script) => script,
6246 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6249 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6252 context: ChannelContext {
6255 config: LegacyChannelConfig {
6256 options: config.channel_config.clone(),
6257 announced_channel: config.channel_handshake_config.announced_channel,
6258 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6263 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6265 channel_id: temporary_channel_id,
6266 temporary_channel_id: Some(temporary_channel_id),
6267 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6268 announcement_sigs_state: AnnouncementSigsState::NotSent,
6270 channel_value_satoshis,
6272 latest_monitor_update_id: 0,
6274 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6275 shutdown_scriptpubkey,
6278 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6279 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6282 pending_inbound_htlcs: Vec::new(),
6283 pending_outbound_htlcs: Vec::new(),
6284 holding_cell_htlc_updates: Vec::new(),
6285 pending_update_fee: None,
6286 holding_cell_update_fee: None,
6287 next_holder_htlc_id: 0,
6288 next_counterparty_htlc_id: 0,
6289 update_time_counter: 1,
6291 resend_order: RAACommitmentOrder::CommitmentFirst,
6293 monitor_pending_channel_ready: false,
6294 monitor_pending_revoke_and_ack: false,
6295 monitor_pending_commitment_signed: false,
6296 monitor_pending_forwards: Vec::new(),
6297 monitor_pending_failures: Vec::new(),
6298 monitor_pending_finalized_fulfills: Vec::new(),
6300 signer_pending_commitment_update: false,
6301 signer_pending_funding: false,
6303 #[cfg(debug_assertions)]
6304 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6305 #[cfg(debug_assertions)]
6306 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6308 last_sent_closing_fee: None,
6309 pending_counterparty_closing_signed: None,
6310 expecting_peer_commitment_signed: false,
6311 closing_fee_limits: None,
6312 target_closing_feerate_sats_per_kw: None,
6314 funding_tx_confirmed_in: None,
6315 funding_tx_confirmation_height: 0,
6316 short_channel_id: None,
6317 channel_creation_height: current_chain_height,
6319 feerate_per_kw: commitment_feerate,
6320 counterparty_dust_limit_satoshis: 0,
6321 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6322 counterparty_max_htlc_value_in_flight_msat: 0,
6323 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6324 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6325 holder_selected_channel_reserve_satoshis,
6326 counterparty_htlc_minimum_msat: 0,
6327 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6328 counterparty_max_accepted_htlcs: 0,
6329 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6330 minimum_depth: None, // Filled in in accept_channel
6332 counterparty_forwarding_info: None,
6334 channel_transaction_parameters: ChannelTransactionParameters {
6335 holder_pubkeys: pubkeys,
6336 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6337 is_outbound_from_holder: true,
6338 counterparty_parameters: None,
6339 funding_outpoint: None,
6340 channel_type_features: channel_type.clone()
6342 funding_transaction: None,
6343 is_batch_funding: None,
6345 counterparty_cur_commitment_point: None,
6346 counterparty_prev_commitment_point: None,
6347 counterparty_node_id,
6349 counterparty_shutdown_scriptpubkey: None,
6351 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6353 channel_update_status: ChannelUpdateStatus::Enabled,
6354 closing_signed_in_flight: false,
6356 announcement_sigs: None,
6358 #[cfg(any(test, fuzzing))]
6359 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6360 #[cfg(any(test, fuzzing))]
6361 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6363 workaround_lnd_bug_4006: None,
6364 sent_message_awaiting_response: None,
6366 latest_inbound_scid_alias: None,
6367 outbound_scid_alias,
6369 channel_pending_event_emitted: false,
6370 channel_ready_event_emitted: false,
6372 #[cfg(any(test, fuzzing))]
6373 historical_inbound_htlc_fulfills: HashSet::new(),
6378 blocked_monitor_updates: Vec::new(),
6380 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6384 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6385 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6386 let counterparty_keys = self.context.build_remote_transaction_keys();
6387 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6388 let signature = match &self.context.holder_signer {
6389 // TODO (taproot|arik): move match into calling method for Taproot
6390 ChannelSignerType::Ecdsa(ecdsa) => {
6391 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6392 .map(|(sig, _)| sig).ok()?
6394 // TODO (taproot|arik)
6399 if self.context.signer_pending_funding {
6400 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6401 self.context.signer_pending_funding = false;
6404 Some(msgs::FundingCreated {
6405 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6406 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6407 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6410 partial_signature_with_nonce: None,
6412 next_local_nonce: None,
6416 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6417 /// a funding_created message for the remote peer.
6418 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6419 /// or if called on an inbound channel.
6420 /// Note that channel_id changes during this call!
6421 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6422 /// If an Err is returned, it is a ChannelError::Close.
6423 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6424 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6425 if !self.context.is_outbound() {
6426 panic!("Tried to create outbound funding_created message on an inbound channel!");
6429 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6430 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6432 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6434 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6435 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6436 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6437 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6440 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6441 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6443 // Now that we're past error-generating stuff, update our local state:
6445 self.context.channel_state = ChannelState::FundingNegotiated;
6446 self.context.channel_id = funding_txo.to_channel_id();
6448 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6449 // We can skip this if it is a zero-conf channel.
6450 if funding_transaction.is_coin_base() &&
6451 self.context.minimum_depth.unwrap_or(0) > 0 &&
6452 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6453 self.context.minimum_depth = Some(COINBASE_MATURITY);
6456 self.context.funding_transaction = Some(funding_transaction);
6457 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6459 let funding_created = self.get_funding_created_msg(logger);
6460 if funding_created.is_none() {
6461 #[cfg(not(async_signing))] {
6462 panic!("Failed to get signature for new funding creation");
6464 #[cfg(async_signing)] {
6465 if !self.context.signer_pending_funding {
6466 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6467 self.context.signer_pending_funding = true;
6475 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6476 // The default channel type (ie the first one we try) depends on whether the channel is
6477 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6478 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6479 // with no other changes, and fall back to `only_static_remotekey`.
6480 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6481 if !config.channel_handshake_config.announced_channel &&
6482 config.channel_handshake_config.negotiate_scid_privacy &&
6483 their_features.supports_scid_privacy() {
6484 ret.set_scid_privacy_required();
6487 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6488 // set it now. If they don't understand it, we'll fall back to our default of
6489 // `only_static_remotekey`.
6490 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6491 their_features.supports_anchors_zero_fee_htlc_tx() {
6492 ret.set_anchors_zero_fee_htlc_tx_required();
6498 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6499 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6500 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6501 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6502 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6503 ) -> Result<msgs::OpenChannel, ()>
6505 F::Target: FeeEstimator
6507 if !self.context.is_outbound() ||
6509 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6510 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6515 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6516 // We've exhausted our options
6519 // We support opening a few different types of channels. Try removing our additional
6520 // features one by one until we've either arrived at our default or the counterparty has
6523 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6524 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6525 // checks whether the counterparty supports every feature, this would only happen if the
6526 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6528 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6529 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6530 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6531 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6532 } else if self.context.channel_type.supports_scid_privacy() {
6533 self.context.channel_type.clear_scid_privacy();
6535 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6537 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6538 Ok(self.get_open_channel(chain_hash))
6541 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6542 if !self.context.is_outbound() {
6543 panic!("Tried to open a channel for an inbound channel?");
6545 if self.context.have_received_message() {
6546 panic!("Cannot generate an open_channel after we've moved forward");
6549 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6550 panic!("Tried to send an open_channel for a channel that has already advanced");
6553 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6554 let keys = self.context.get_holder_pubkeys();
6558 temporary_channel_id: self.context.channel_id,
6559 funding_satoshis: self.context.channel_value_satoshis,
6560 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6561 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6562 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6563 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6564 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6565 feerate_per_kw: self.context.feerate_per_kw as u32,
6566 to_self_delay: self.context.get_holder_selected_contest_delay(),
6567 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6568 funding_pubkey: keys.funding_pubkey,
6569 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6570 payment_point: keys.payment_point,
6571 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6572 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6573 first_per_commitment_point,
6574 channel_flags: if self.context.config.announced_channel {1} else {0},
6575 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6576 Some(script) => script.clone().into_inner(),
6577 None => Builder::new().into_script(),
6579 channel_type: Some(self.context.channel_type.clone()),
6584 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6585 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6587 // Check sanity of message fields:
6588 if !self.context.is_outbound() {
6589 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6591 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6592 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6594 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6595 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6597 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6598 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6600 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6601 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6603 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6604 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6605 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6607 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6608 if msg.htlc_minimum_msat >= full_channel_value_msat {
6609 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6611 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6612 if msg.to_self_delay > max_delay_acceptable {
6613 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6615 if msg.max_accepted_htlcs < 1 {
6616 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6618 if msg.max_accepted_htlcs > MAX_HTLCS {
6619 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6622 // Now check against optional parameters as set by config...
6623 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6624 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6626 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6627 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6629 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6630 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6632 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6633 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6635 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6636 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6638 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6639 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6641 if msg.minimum_depth > peer_limits.max_minimum_depth {
6642 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6645 if let Some(ty) = &msg.channel_type {
6646 if *ty != self.context.channel_type {
6647 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6649 } else if their_features.supports_channel_type() {
6650 // Assume they've accepted the channel type as they said they understand it.
6652 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6653 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6654 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6656 self.context.channel_type = channel_type.clone();
6657 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6660 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6661 match &msg.shutdown_scriptpubkey {
6662 &Some(ref script) => {
6663 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6664 if script.len() == 0 {
6667 if !script::is_bolt2_compliant(&script, their_features) {
6668 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6670 Some(script.clone())
6673 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6675 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6680 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6681 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6682 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6683 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6684 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6686 if peer_limits.trust_own_funding_0conf {
6687 self.context.minimum_depth = Some(msg.minimum_depth);
6689 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6692 let counterparty_pubkeys = ChannelPublicKeys {
6693 funding_pubkey: msg.funding_pubkey,
6694 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6695 payment_point: msg.payment_point,
6696 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6697 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6700 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6701 selected_contest_delay: msg.to_self_delay,
6702 pubkeys: counterparty_pubkeys,
6705 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6706 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6708 self.context.channel_state = ChannelState::NegotiatingFunding(
6709 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6711 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6716 /// Handles a funding_signed message from the remote end.
6717 /// If this call is successful, broadcast the funding transaction (and not before!)
6718 pub fn funding_signed<L: Deref>(
6719 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6720 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6724 if !self.context.is_outbound() {
6725 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6727 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6728 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6730 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6731 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6732 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6733 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6736 let funding_script = self.context.get_funding_redeemscript();
6738 let counterparty_keys = self.context.build_remote_transaction_keys();
6739 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6740 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6741 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6743 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6744 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6746 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6747 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6749 let trusted_tx = initial_commitment_tx.trust();
6750 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6751 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6752 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6753 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6754 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6758 let holder_commitment_tx = HolderCommitmentTransaction::new(
6759 initial_commitment_tx,
6762 &self.context.get_holder_pubkeys().funding_pubkey,
6763 self.context.counterparty_funding_pubkey()
6767 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6768 if validated.is_err() {
6769 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6772 let funding_redeemscript = self.context.get_funding_redeemscript();
6773 let funding_txo = self.context.get_funding_txo().unwrap();
6774 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6775 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6776 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6777 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6778 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6779 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6780 shutdown_script, self.context.get_holder_selected_contest_delay(),
6781 &self.context.destination_script, (funding_txo, funding_txo_script),
6782 &self.context.channel_transaction_parameters,
6783 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6785 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6786 channel_monitor.provide_initial_counterparty_commitment_tx(
6787 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6788 self.context.cur_counterparty_commitment_transaction_number,
6789 self.context.counterparty_cur_commitment_point.unwrap(),
6790 counterparty_initial_commitment_tx.feerate_per_kw(),
6791 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6792 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6794 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6795 if self.context.is_batch_funding() {
6796 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6798 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6800 self.context.cur_holder_commitment_transaction_number -= 1;
6801 self.context.cur_counterparty_commitment_transaction_number -= 1;
6803 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6805 let mut channel = Channel { context: self.context };
6807 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6808 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6809 Ok((channel, channel_monitor))
6812 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6814 #[cfg(async_signing)]
6815 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6816 if self.context.signer_pending_funding && self.context.is_outbound() {
6817 log_trace!(logger, "Signer unblocked a funding_created");
6818 self.get_funding_created_msg(logger)
6823 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6824 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6825 pub context: ChannelContext<SP>,
6826 pub unfunded_context: UnfundedChannelContext,
6829 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6830 /// Creates a new channel from a remote sides' request for one.
6831 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6832 pub fn new<ES: Deref, F: Deref, L: Deref>(
6833 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6834 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6835 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6836 current_chain_height: u32, logger: &L, is_0conf: bool,
6837 ) -> Result<InboundV1Channel<SP>, ChannelError>
6838 where ES::Target: EntropySource,
6839 F::Target: FeeEstimator,
6842 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6843 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6845 // First check the channel type is known, failing before we do anything else if we don't
6846 // support this channel type.
6847 let channel_type = if let Some(channel_type) = &msg.channel_type {
6848 if channel_type.supports_any_optional_bits() {
6849 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6852 // We only support the channel types defined by the `ChannelManager` in
6853 // `provided_channel_type_features`. The channel type must always support
6854 // `static_remote_key`.
6855 if !channel_type.requires_static_remote_key() {
6856 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6858 // Make sure we support all of the features behind the channel type.
6859 if !channel_type.is_subset(our_supported_features) {
6860 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6862 if channel_type.requires_scid_privacy() && announced_channel {
6863 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6865 channel_type.clone()
6867 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6868 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6869 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6874 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6875 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6876 let pubkeys = holder_signer.pubkeys().clone();
6877 let counterparty_pubkeys = ChannelPublicKeys {
6878 funding_pubkey: msg.funding_pubkey,
6879 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6880 payment_point: msg.payment_point,
6881 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6882 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6885 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6886 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6889 // Check sanity of message fields:
6890 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6891 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6893 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6894 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6896 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6897 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6899 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6900 if msg.push_msat > full_channel_value_msat {
6901 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6903 if msg.dust_limit_satoshis > msg.funding_satoshis {
6904 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6906 if msg.htlc_minimum_msat >= full_channel_value_msat {
6907 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6909 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6911 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6912 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6913 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6915 if msg.max_accepted_htlcs < 1 {
6916 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6918 if msg.max_accepted_htlcs > MAX_HTLCS {
6919 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6922 // Now check against optional parameters as set by config...
6923 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6924 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6926 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6927 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6929 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6930 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6932 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6933 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6935 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6936 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6938 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6939 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6941 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6942 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6945 // Convert things into internal flags and prep our state:
6947 if config.channel_handshake_limits.force_announced_channel_preference {
6948 if config.channel_handshake_config.announced_channel != announced_channel {
6949 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6953 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6954 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6955 // Protocol level safety check in place, although it should never happen because
6956 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6957 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6959 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6960 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6962 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6963 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6964 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6966 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6967 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6970 // check if the funder's amount for the initial commitment tx is sufficient
6971 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6972 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6973 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6977 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6978 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6979 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6980 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6983 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6984 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6985 // want to push much to us), our counterparty should always have more than our reserve.
6986 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6987 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6990 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6991 match &msg.shutdown_scriptpubkey {
6992 &Some(ref script) => {
6993 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6994 if script.len() == 0 {
6997 if !script::is_bolt2_compliant(&script, their_features) {
6998 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
7000 Some(script.clone())
7003 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7005 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7010 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
7011 match signer_provider.get_shutdown_scriptpubkey() {
7012 Ok(scriptpubkey) => Some(scriptpubkey),
7013 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
7017 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
7018 if !shutdown_scriptpubkey.is_compatible(&their_features) {
7019 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7023 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7024 Ok(script) => script,
7025 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7028 let mut secp_ctx = Secp256k1::new();
7029 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7031 let minimum_depth = if is_0conf {
7034 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7038 context: ChannelContext {
7041 config: LegacyChannelConfig {
7042 options: config.channel_config.clone(),
7044 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7049 inbound_handshake_limits_override: None,
7051 temporary_channel_id: Some(msg.temporary_channel_id),
7052 channel_id: msg.temporary_channel_id,
7053 channel_state: ChannelState::NegotiatingFunding(
7054 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7056 announcement_sigs_state: AnnouncementSigsState::NotSent,
7059 latest_monitor_update_id: 0,
7061 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7062 shutdown_scriptpubkey,
7065 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7066 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7067 value_to_self_msat: msg.push_msat,
7069 pending_inbound_htlcs: Vec::new(),
7070 pending_outbound_htlcs: Vec::new(),
7071 holding_cell_htlc_updates: Vec::new(),
7072 pending_update_fee: None,
7073 holding_cell_update_fee: None,
7074 next_holder_htlc_id: 0,
7075 next_counterparty_htlc_id: 0,
7076 update_time_counter: 1,
7078 resend_order: RAACommitmentOrder::CommitmentFirst,
7080 monitor_pending_channel_ready: false,
7081 monitor_pending_revoke_and_ack: false,
7082 monitor_pending_commitment_signed: false,
7083 monitor_pending_forwards: Vec::new(),
7084 monitor_pending_failures: Vec::new(),
7085 monitor_pending_finalized_fulfills: Vec::new(),
7087 signer_pending_commitment_update: false,
7088 signer_pending_funding: false,
7090 #[cfg(debug_assertions)]
7091 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7092 #[cfg(debug_assertions)]
7093 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7095 last_sent_closing_fee: None,
7096 pending_counterparty_closing_signed: None,
7097 expecting_peer_commitment_signed: false,
7098 closing_fee_limits: None,
7099 target_closing_feerate_sats_per_kw: None,
7101 funding_tx_confirmed_in: None,
7102 funding_tx_confirmation_height: 0,
7103 short_channel_id: None,
7104 channel_creation_height: current_chain_height,
7106 feerate_per_kw: msg.feerate_per_kw,
7107 channel_value_satoshis: msg.funding_satoshis,
7108 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7109 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7110 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7111 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7112 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7113 holder_selected_channel_reserve_satoshis,
7114 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7115 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7116 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7117 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7120 counterparty_forwarding_info: None,
7122 channel_transaction_parameters: ChannelTransactionParameters {
7123 holder_pubkeys: pubkeys,
7124 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7125 is_outbound_from_holder: false,
7126 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7127 selected_contest_delay: msg.to_self_delay,
7128 pubkeys: counterparty_pubkeys,
7130 funding_outpoint: None,
7131 channel_type_features: channel_type.clone()
7133 funding_transaction: None,
7134 is_batch_funding: None,
7136 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7137 counterparty_prev_commitment_point: None,
7138 counterparty_node_id,
7140 counterparty_shutdown_scriptpubkey,
7142 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7144 channel_update_status: ChannelUpdateStatus::Enabled,
7145 closing_signed_in_flight: false,
7147 announcement_sigs: None,
7149 #[cfg(any(test, fuzzing))]
7150 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7151 #[cfg(any(test, fuzzing))]
7152 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7154 workaround_lnd_bug_4006: None,
7155 sent_message_awaiting_response: None,
7157 latest_inbound_scid_alias: None,
7158 outbound_scid_alias: 0,
7160 channel_pending_event_emitted: false,
7161 channel_ready_event_emitted: false,
7163 #[cfg(any(test, fuzzing))]
7164 historical_inbound_htlc_fulfills: HashSet::new(),
7169 blocked_monitor_updates: Vec::new(),
7171 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7177 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7178 /// should be sent back to the counterparty node.
7180 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7181 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7182 if self.context.is_outbound() {
7183 panic!("Tried to send accept_channel for an outbound channel?");
7186 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7187 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7189 panic!("Tried to send accept_channel after channel had moved forward");
7191 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7192 panic!("Tried to send an accept_channel for a channel that has already advanced");
7195 self.generate_accept_channel_message()
7198 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7199 /// inbound channel. If the intention is to accept an inbound channel, use
7200 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7202 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7203 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7204 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7205 let keys = self.context.get_holder_pubkeys();
7207 msgs::AcceptChannel {
7208 temporary_channel_id: self.context.channel_id,
7209 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7210 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7211 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7212 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7213 minimum_depth: self.context.minimum_depth.unwrap(),
7214 to_self_delay: self.context.get_holder_selected_contest_delay(),
7215 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7216 funding_pubkey: keys.funding_pubkey,
7217 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7218 payment_point: keys.payment_point,
7219 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7220 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7221 first_per_commitment_point,
7222 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7223 Some(script) => script.clone().into_inner(),
7224 None => Builder::new().into_script(),
7226 channel_type: Some(self.context.channel_type.clone()),
7228 next_local_nonce: None,
7232 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7233 /// inbound channel without accepting it.
7235 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7237 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7238 self.generate_accept_channel_message()
7241 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7242 let funding_script = self.context.get_funding_redeemscript();
7244 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7245 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7246 let trusted_tx = initial_commitment_tx.trust();
7247 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7248 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7249 // They sign the holder commitment transaction...
7250 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7251 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7252 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7253 encode::serialize_hex(&funding_script), &self.context.channel_id());
7254 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7256 Ok(initial_commitment_tx)
7259 pub fn funding_created<L: Deref>(
7260 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7261 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7265 if self.context.is_outbound() {
7266 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7269 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7270 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7272 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7273 // remember the channel, so it's safe to just send an error_message here and drop the
7275 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7277 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7278 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7279 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7280 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7283 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7284 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7285 // This is an externally observable change before we finish all our checks. In particular
7286 // check_funding_created_signature may fail.
7287 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7289 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7291 Err(ChannelError::Close(e)) => {
7292 self.context.channel_transaction_parameters.funding_outpoint = None;
7293 return Err((self, ChannelError::Close(e)));
7296 // The only error we know how to handle is ChannelError::Close, so we fall over here
7297 // to make sure we don't continue with an inconsistent state.
7298 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7302 let holder_commitment_tx = HolderCommitmentTransaction::new(
7303 initial_commitment_tx,
7306 &self.context.get_holder_pubkeys().funding_pubkey,
7307 self.context.counterparty_funding_pubkey()
7310 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7311 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7314 // Now that we're past error-generating stuff, update our local state:
7316 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7317 self.context.channel_id = funding_txo.to_channel_id();
7318 self.context.cur_counterparty_commitment_transaction_number -= 1;
7319 self.context.cur_holder_commitment_transaction_number -= 1;
7321 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7323 let funding_redeemscript = self.context.get_funding_redeemscript();
7324 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7325 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7326 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7327 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7328 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7329 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7330 shutdown_script, self.context.get_holder_selected_contest_delay(),
7331 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7332 &self.context.channel_transaction_parameters,
7333 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7335 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7336 channel_monitor.provide_initial_counterparty_commitment_tx(
7337 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7338 self.context.cur_counterparty_commitment_transaction_number + 1,
7339 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7340 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7341 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7343 log_info!(logger, "{} funding_signed for peer for channel {}",
7344 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7346 // Promote the channel to a full-fledged one now that we have updated the state and have a
7347 // `ChannelMonitor`.
7348 let mut channel = Channel {
7349 context: self.context,
7351 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7352 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7354 Ok((channel, funding_signed, channel_monitor))
7358 const SERIALIZATION_VERSION: u8 = 3;
7359 const MIN_SERIALIZATION_VERSION: u8 = 3;
7361 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7367 impl Writeable for ChannelUpdateStatus {
7368 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7369 // We only care about writing out the current state as it was announced, ie only either
7370 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7371 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7373 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7374 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7375 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7376 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7382 impl Readable for ChannelUpdateStatus {
7383 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7384 Ok(match <u8 as Readable>::read(reader)? {
7385 0 => ChannelUpdateStatus::Enabled,
7386 1 => ChannelUpdateStatus::Disabled,
7387 _ => return Err(DecodeError::InvalidValue),
7392 impl Writeable for AnnouncementSigsState {
7393 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7394 // We only care about writing out the current state as if we had just disconnected, at
7395 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7397 AnnouncementSigsState::NotSent => 0u8.write(writer),
7398 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7399 AnnouncementSigsState::Committed => 0u8.write(writer),
7400 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7405 impl Readable for AnnouncementSigsState {
7406 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7407 Ok(match <u8 as Readable>::read(reader)? {
7408 0 => AnnouncementSigsState::NotSent,
7409 1 => AnnouncementSigsState::PeerReceived,
7410 _ => return Err(DecodeError::InvalidValue),
7415 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7416 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7417 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7420 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7422 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7423 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7424 // the low bytes now and the optional high bytes later.
7425 let user_id_low = self.context.user_id as u64;
7426 user_id_low.write(writer)?;
7428 // Version 1 deserializers expected to read parts of the config object here. Version 2
7429 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7430 // `minimum_depth` we simply write dummy values here.
7431 writer.write_all(&[0; 8])?;
7433 self.context.channel_id.write(writer)?;
7435 let mut channel_state = self.context.channel_state;
7436 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7437 channel_state.set_peer_disconnected();
7439 channel_state.to_u32().write(writer)?;
7441 self.context.channel_value_satoshis.write(writer)?;
7443 self.context.latest_monitor_update_id.write(writer)?;
7445 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7446 // deserialized from that format.
7447 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7448 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7449 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7451 self.context.destination_script.write(writer)?;
7453 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7454 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7455 self.context.value_to_self_msat.write(writer)?;
7457 let mut dropped_inbound_htlcs = 0;
7458 for htlc in self.context.pending_inbound_htlcs.iter() {
7459 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7460 dropped_inbound_htlcs += 1;
7463 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7464 for htlc in self.context.pending_inbound_htlcs.iter() {
7465 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7468 htlc.htlc_id.write(writer)?;
7469 htlc.amount_msat.write(writer)?;
7470 htlc.cltv_expiry.write(writer)?;
7471 htlc.payment_hash.write(writer)?;
7473 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7474 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7476 htlc_state.write(writer)?;
7478 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7480 htlc_state.write(writer)?;
7482 &InboundHTLCState::Committed => {
7485 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7487 removal_reason.write(writer)?;
7492 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7493 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7494 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7496 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7497 for htlc in self.context.pending_outbound_htlcs.iter() {
7498 htlc.htlc_id.write(writer)?;
7499 htlc.amount_msat.write(writer)?;
7500 htlc.cltv_expiry.write(writer)?;
7501 htlc.payment_hash.write(writer)?;
7502 htlc.source.write(writer)?;
7504 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7506 onion_packet.write(writer)?;
7508 &OutboundHTLCState::Committed => {
7511 &OutboundHTLCState::RemoteRemoved(_) => {
7512 // Treat this as a Committed because we haven't received the CS - they'll
7513 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7516 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7518 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7519 preimages.push(preimage);
7521 let reason: Option<&HTLCFailReason> = outcome.into();
7522 reason.write(writer)?;
7524 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7526 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7527 preimages.push(preimage);
7529 let reason: Option<&HTLCFailReason> = outcome.into();
7530 reason.write(writer)?;
7533 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7534 pending_outbound_blinding_points.push(htlc.blinding_point);
7537 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7538 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7539 // Vec of (htlc_id, failure_code, sha256_of_onion)
7540 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7541 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7542 for update in self.context.holding_cell_htlc_updates.iter() {
7544 &HTLCUpdateAwaitingACK::AddHTLC {
7545 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7546 blinding_point, skimmed_fee_msat,
7549 amount_msat.write(writer)?;
7550 cltv_expiry.write(writer)?;
7551 payment_hash.write(writer)?;
7552 source.write(writer)?;
7553 onion_routing_packet.write(writer)?;
7555 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7556 holding_cell_blinding_points.push(blinding_point);
7558 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7560 payment_preimage.write(writer)?;
7561 htlc_id.write(writer)?;
7563 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7565 htlc_id.write(writer)?;
7566 err_packet.write(writer)?;
7568 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7569 htlc_id, failure_code, sha256_of_onion
7571 // We don't want to break downgrading by adding a new variant, so write a dummy
7572 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7573 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7575 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7577 htlc_id.write(writer)?;
7578 dummy_err_packet.write(writer)?;
7583 match self.context.resend_order {
7584 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7585 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7588 self.context.monitor_pending_channel_ready.write(writer)?;
7589 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7590 self.context.monitor_pending_commitment_signed.write(writer)?;
7592 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7593 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7594 pending_forward.write(writer)?;
7595 htlc_id.write(writer)?;
7598 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7599 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7600 htlc_source.write(writer)?;
7601 payment_hash.write(writer)?;
7602 fail_reason.write(writer)?;
7605 if self.context.is_outbound() {
7606 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7607 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7608 Some(feerate).write(writer)?;
7610 // As for inbound HTLCs, if the update was only announced and never committed in a
7611 // commitment_signed, drop it.
7612 None::<u32>.write(writer)?;
7614 self.context.holding_cell_update_fee.write(writer)?;
7616 self.context.next_holder_htlc_id.write(writer)?;
7617 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7618 self.context.update_time_counter.write(writer)?;
7619 self.context.feerate_per_kw.write(writer)?;
7621 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7622 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7623 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7624 // consider the stale state on reload.
7627 self.context.funding_tx_confirmed_in.write(writer)?;
7628 self.context.funding_tx_confirmation_height.write(writer)?;
7629 self.context.short_channel_id.write(writer)?;
7631 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7632 self.context.holder_dust_limit_satoshis.write(writer)?;
7633 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7635 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7636 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7638 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7639 self.context.holder_htlc_minimum_msat.write(writer)?;
7640 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7642 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7643 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7645 match &self.context.counterparty_forwarding_info {
7648 info.fee_base_msat.write(writer)?;
7649 info.fee_proportional_millionths.write(writer)?;
7650 info.cltv_expiry_delta.write(writer)?;
7652 None => 0u8.write(writer)?
7655 self.context.channel_transaction_parameters.write(writer)?;
7656 self.context.funding_transaction.write(writer)?;
7658 self.context.counterparty_cur_commitment_point.write(writer)?;
7659 self.context.counterparty_prev_commitment_point.write(writer)?;
7660 self.context.counterparty_node_id.write(writer)?;
7662 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7664 self.context.commitment_secrets.write(writer)?;
7666 self.context.channel_update_status.write(writer)?;
7668 #[cfg(any(test, fuzzing))]
7669 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7670 #[cfg(any(test, fuzzing))]
7671 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7672 htlc.write(writer)?;
7675 // If the channel type is something other than only-static-remote-key, then we need to have
7676 // older clients fail to deserialize this channel at all. If the type is
7677 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7679 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7680 Some(&self.context.channel_type) } else { None };
7682 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7683 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7684 // a different percentage of the channel value then 10%, which older versions of LDK used
7685 // to set it to before the percentage was made configurable.
7686 let serialized_holder_selected_reserve =
7687 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7688 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7690 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7691 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7692 let serialized_holder_htlc_max_in_flight =
7693 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7694 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7696 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7697 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7699 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7700 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7701 // we write the high bytes as an option here.
7702 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7704 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7706 write_tlv_fields!(writer, {
7707 (0, self.context.announcement_sigs, option),
7708 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7709 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7710 // them twice, once with their original default values above, and once as an option
7711 // here. On the read side, old versions will simply ignore the odd-type entries here,
7712 // and new versions map the default values to None and allow the TLV entries here to
7714 (1, self.context.minimum_depth, option),
7715 (2, chan_type, option),
7716 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7717 (4, serialized_holder_selected_reserve, option),
7718 (5, self.context.config, required),
7719 (6, serialized_holder_htlc_max_in_flight, option),
7720 (7, self.context.shutdown_scriptpubkey, option),
7721 (8, self.context.blocked_monitor_updates, optional_vec),
7722 (9, self.context.target_closing_feerate_sats_per_kw, option),
7723 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7724 (13, self.context.channel_creation_height, required),
7725 (15, preimages, required_vec),
7726 (17, self.context.announcement_sigs_state, required),
7727 (19, self.context.latest_inbound_scid_alias, option),
7728 (21, self.context.outbound_scid_alias, required),
7729 (23, channel_ready_event_emitted, option),
7730 (25, user_id_high_opt, option),
7731 (27, self.context.channel_keys_id, required),
7732 (28, holder_max_accepted_htlcs, option),
7733 (29, self.context.temporary_channel_id, option),
7734 (31, channel_pending_event_emitted, option),
7735 (35, pending_outbound_skimmed_fees, optional_vec),
7736 (37, holding_cell_skimmed_fees, optional_vec),
7737 (38, self.context.is_batch_funding, option),
7738 (39, pending_outbound_blinding_points, optional_vec),
7739 (41, holding_cell_blinding_points, optional_vec),
7740 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7747 const MAX_ALLOC_SIZE: usize = 64*1024;
7748 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7750 ES::Target: EntropySource,
7751 SP::Target: SignerProvider
7753 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7754 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7755 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7757 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7758 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7759 // the low bytes now and the high bytes later.
7760 let user_id_low: u64 = Readable::read(reader)?;
7762 let mut config = Some(LegacyChannelConfig::default());
7764 // Read the old serialization of the ChannelConfig from version 0.0.98.
7765 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7766 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7767 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7768 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7770 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7771 let mut _val: u64 = Readable::read(reader)?;
7774 let channel_id = Readable::read(reader)?;
7775 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7776 let channel_value_satoshis = Readable::read(reader)?;
7778 let latest_monitor_update_id = Readable::read(reader)?;
7780 let mut keys_data = None;
7782 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7783 // the `channel_keys_id` TLV is present below.
7784 let keys_len: u32 = Readable::read(reader)?;
7785 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7786 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7787 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7788 let mut data = [0; 1024];
7789 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7790 reader.read_exact(read_slice)?;
7791 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7795 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7796 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7797 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7800 let destination_script = Readable::read(reader)?;
7802 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7803 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7804 let value_to_self_msat = Readable::read(reader)?;
7806 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7808 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7809 for _ in 0..pending_inbound_htlc_count {
7810 pending_inbound_htlcs.push(InboundHTLCOutput {
7811 htlc_id: Readable::read(reader)?,
7812 amount_msat: Readable::read(reader)?,
7813 cltv_expiry: Readable::read(reader)?,
7814 payment_hash: Readable::read(reader)?,
7815 state: match <u8 as Readable>::read(reader)? {
7816 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7817 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7818 3 => InboundHTLCState::Committed,
7819 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7820 _ => return Err(DecodeError::InvalidValue),
7825 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7826 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7827 for _ in 0..pending_outbound_htlc_count {
7828 pending_outbound_htlcs.push(OutboundHTLCOutput {
7829 htlc_id: Readable::read(reader)?,
7830 amount_msat: Readable::read(reader)?,
7831 cltv_expiry: Readable::read(reader)?,
7832 payment_hash: Readable::read(reader)?,
7833 source: Readable::read(reader)?,
7834 state: match <u8 as Readable>::read(reader)? {
7835 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7836 1 => OutboundHTLCState::Committed,
7838 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7839 OutboundHTLCState::RemoteRemoved(option.into())
7842 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7843 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7846 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7847 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7849 _ => return Err(DecodeError::InvalidValue),
7851 skimmed_fee_msat: None,
7852 blinding_point: None,
7856 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7857 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7858 for _ in 0..holding_cell_htlc_update_count {
7859 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7860 0 => HTLCUpdateAwaitingACK::AddHTLC {
7861 amount_msat: Readable::read(reader)?,
7862 cltv_expiry: Readable::read(reader)?,
7863 payment_hash: Readable::read(reader)?,
7864 source: Readable::read(reader)?,
7865 onion_routing_packet: Readable::read(reader)?,
7866 skimmed_fee_msat: None,
7867 blinding_point: None,
7869 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7870 payment_preimage: Readable::read(reader)?,
7871 htlc_id: Readable::read(reader)?,
7873 2 => HTLCUpdateAwaitingACK::FailHTLC {
7874 htlc_id: Readable::read(reader)?,
7875 err_packet: Readable::read(reader)?,
7877 _ => return Err(DecodeError::InvalidValue),
7881 let resend_order = match <u8 as Readable>::read(reader)? {
7882 0 => RAACommitmentOrder::CommitmentFirst,
7883 1 => RAACommitmentOrder::RevokeAndACKFirst,
7884 _ => return Err(DecodeError::InvalidValue),
7887 let monitor_pending_channel_ready = Readable::read(reader)?;
7888 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7889 let monitor_pending_commitment_signed = Readable::read(reader)?;
7891 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7892 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7893 for _ in 0..monitor_pending_forwards_count {
7894 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7897 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7898 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7899 for _ in 0..monitor_pending_failures_count {
7900 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7903 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7905 let holding_cell_update_fee = Readable::read(reader)?;
7907 let next_holder_htlc_id = Readable::read(reader)?;
7908 let next_counterparty_htlc_id = Readable::read(reader)?;
7909 let update_time_counter = Readable::read(reader)?;
7910 let feerate_per_kw = Readable::read(reader)?;
7912 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7913 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7914 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7915 // consider the stale state on reload.
7916 match <u8 as Readable>::read(reader)? {
7919 let _: u32 = Readable::read(reader)?;
7920 let _: u64 = Readable::read(reader)?;
7921 let _: Signature = Readable::read(reader)?;
7923 _ => return Err(DecodeError::InvalidValue),
7926 let funding_tx_confirmed_in = Readable::read(reader)?;
7927 let funding_tx_confirmation_height = Readable::read(reader)?;
7928 let short_channel_id = Readable::read(reader)?;
7930 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7931 let holder_dust_limit_satoshis = Readable::read(reader)?;
7932 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7933 let mut counterparty_selected_channel_reserve_satoshis = None;
7935 // Read the old serialization from version 0.0.98.
7936 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7938 // Read the 8 bytes of backwards-compatibility data.
7939 let _dummy: u64 = Readable::read(reader)?;
7941 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7942 let holder_htlc_minimum_msat = Readable::read(reader)?;
7943 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7945 let mut minimum_depth = None;
7947 // Read the old serialization from version 0.0.98.
7948 minimum_depth = Some(Readable::read(reader)?);
7950 // Read the 4 bytes of backwards-compatibility data.
7951 let _dummy: u32 = Readable::read(reader)?;
7954 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7956 1 => Some(CounterpartyForwardingInfo {
7957 fee_base_msat: Readable::read(reader)?,
7958 fee_proportional_millionths: Readable::read(reader)?,
7959 cltv_expiry_delta: Readable::read(reader)?,
7961 _ => return Err(DecodeError::InvalidValue),
7964 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7965 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7967 let counterparty_cur_commitment_point = Readable::read(reader)?;
7969 let counterparty_prev_commitment_point = Readable::read(reader)?;
7970 let counterparty_node_id = Readable::read(reader)?;
7972 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7973 let commitment_secrets = Readable::read(reader)?;
7975 let channel_update_status = Readable::read(reader)?;
7977 #[cfg(any(test, fuzzing))]
7978 let mut historical_inbound_htlc_fulfills = HashSet::new();
7979 #[cfg(any(test, fuzzing))]
7981 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7982 for _ in 0..htlc_fulfills_len {
7983 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7987 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7988 Some((feerate, if channel_parameters.is_outbound_from_holder {
7989 FeeUpdateState::Outbound
7991 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7997 let mut announcement_sigs = None;
7998 let mut target_closing_feerate_sats_per_kw = None;
7999 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8000 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8001 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8002 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8003 // only, so we default to that if none was written.
8004 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8005 let mut channel_creation_height = Some(serialized_height);
8006 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8008 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8009 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8010 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8011 let mut latest_inbound_scid_alias = None;
8012 let mut outbound_scid_alias = None;
8013 let mut channel_pending_event_emitted = None;
8014 let mut channel_ready_event_emitted = None;
8016 let mut user_id_high_opt: Option<u64> = None;
8017 let mut channel_keys_id: Option<[u8; 32]> = None;
8018 let mut temporary_channel_id: Option<ChannelId> = None;
8019 let mut holder_max_accepted_htlcs: Option<u16> = None;
8021 let mut blocked_monitor_updates = Some(Vec::new());
8023 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8024 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8026 let mut is_batch_funding: Option<()> = None;
8028 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8029 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8031 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8033 read_tlv_fields!(reader, {
8034 (0, announcement_sigs, option),
8035 (1, minimum_depth, option),
8036 (2, channel_type, option),
8037 (3, counterparty_selected_channel_reserve_satoshis, option),
8038 (4, holder_selected_channel_reserve_satoshis, option),
8039 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8040 (6, holder_max_htlc_value_in_flight_msat, option),
8041 (7, shutdown_scriptpubkey, option),
8042 (8, blocked_monitor_updates, optional_vec),
8043 (9, target_closing_feerate_sats_per_kw, option),
8044 (11, monitor_pending_finalized_fulfills, optional_vec),
8045 (13, channel_creation_height, option),
8046 (15, preimages_opt, optional_vec),
8047 (17, announcement_sigs_state, option),
8048 (19, latest_inbound_scid_alias, option),
8049 (21, outbound_scid_alias, option),
8050 (23, channel_ready_event_emitted, option),
8051 (25, user_id_high_opt, option),
8052 (27, channel_keys_id, option),
8053 (28, holder_max_accepted_htlcs, option),
8054 (29, temporary_channel_id, option),
8055 (31, channel_pending_event_emitted, option),
8056 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8057 (37, holding_cell_skimmed_fees_opt, optional_vec),
8058 (38, is_batch_funding, option),
8059 (39, pending_outbound_blinding_points_opt, optional_vec),
8060 (41, holding_cell_blinding_points_opt, optional_vec),
8061 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8064 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8065 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8066 // If we've gotten to the funding stage of the channel, populate the signer with its
8067 // required channel parameters.
8068 if channel_state >= ChannelState::FundingNegotiated {
8069 holder_signer.provide_channel_parameters(&channel_parameters);
8071 (channel_keys_id, holder_signer)
8073 // `keys_data` can be `None` if we had corrupted data.
8074 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8075 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8076 (holder_signer.channel_keys_id(), holder_signer)
8079 if let Some(preimages) = preimages_opt {
8080 let mut iter = preimages.into_iter();
8081 for htlc in pending_outbound_htlcs.iter_mut() {
8083 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8084 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8086 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8087 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8092 // We expect all preimages to be consumed above
8093 if iter.next().is_some() {
8094 return Err(DecodeError::InvalidValue);
8098 let chan_features = channel_type.as_ref().unwrap();
8099 if !chan_features.is_subset(our_supported_features) {
8100 // If the channel was written by a new version and negotiated with features we don't
8101 // understand yet, refuse to read it.
8102 return Err(DecodeError::UnknownRequiredFeature);
8105 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8106 // To account for that, we're proactively setting/overriding the field here.
8107 channel_parameters.channel_type_features = chan_features.clone();
8109 let mut secp_ctx = Secp256k1::new();
8110 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8112 // `user_id` used to be a single u64 value. In order to remain backwards
8113 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8114 // separate u64 values.
8115 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8117 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8119 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8120 let mut iter = skimmed_fees.into_iter();
8121 for htlc in pending_outbound_htlcs.iter_mut() {
8122 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8124 // We expect all skimmed fees to be consumed above
8125 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8127 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8128 let mut iter = skimmed_fees.into_iter();
8129 for htlc in holding_cell_htlc_updates.iter_mut() {
8130 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8131 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8134 // We expect all skimmed fees to be consumed above
8135 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8137 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8138 let mut iter = blinding_pts.into_iter();
8139 for htlc in pending_outbound_htlcs.iter_mut() {
8140 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8142 // We expect all blinding points to be consumed above
8143 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8145 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8146 let mut iter = blinding_pts.into_iter();
8147 for htlc in holding_cell_htlc_updates.iter_mut() {
8148 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8149 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8152 // We expect all blinding points to be consumed above
8153 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8156 if let Some(malformed_htlcs) = malformed_htlcs {
8157 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8158 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8159 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8160 let matches = *htlc_id == malformed_htlc_id;
8161 if matches { debug_assert!(err_packet.data.is_empty()) }
8164 }).ok_or(DecodeError::InvalidValue)?;
8165 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8166 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8168 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8173 context: ChannelContext {
8176 config: config.unwrap(),
8180 // Note that we don't care about serializing handshake limits as we only ever serialize
8181 // channel data after the handshake has completed.
8182 inbound_handshake_limits_override: None,
8185 temporary_channel_id,
8187 announcement_sigs_state: announcement_sigs_state.unwrap(),
8189 channel_value_satoshis,
8191 latest_monitor_update_id,
8193 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8194 shutdown_scriptpubkey,
8197 cur_holder_commitment_transaction_number,
8198 cur_counterparty_commitment_transaction_number,
8201 holder_max_accepted_htlcs,
8202 pending_inbound_htlcs,
8203 pending_outbound_htlcs,
8204 holding_cell_htlc_updates,
8208 monitor_pending_channel_ready,
8209 monitor_pending_revoke_and_ack,
8210 monitor_pending_commitment_signed,
8211 monitor_pending_forwards,
8212 monitor_pending_failures,
8213 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8215 signer_pending_commitment_update: false,
8216 signer_pending_funding: false,
8219 holding_cell_update_fee,
8220 next_holder_htlc_id,
8221 next_counterparty_htlc_id,
8222 update_time_counter,
8225 #[cfg(debug_assertions)]
8226 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8227 #[cfg(debug_assertions)]
8228 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8230 last_sent_closing_fee: None,
8231 pending_counterparty_closing_signed: None,
8232 expecting_peer_commitment_signed: false,
8233 closing_fee_limits: None,
8234 target_closing_feerate_sats_per_kw,
8236 funding_tx_confirmed_in,
8237 funding_tx_confirmation_height,
8239 channel_creation_height: channel_creation_height.unwrap(),
8241 counterparty_dust_limit_satoshis,
8242 holder_dust_limit_satoshis,
8243 counterparty_max_htlc_value_in_flight_msat,
8244 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8245 counterparty_selected_channel_reserve_satoshis,
8246 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8247 counterparty_htlc_minimum_msat,
8248 holder_htlc_minimum_msat,
8249 counterparty_max_accepted_htlcs,
8252 counterparty_forwarding_info,
8254 channel_transaction_parameters: channel_parameters,
8255 funding_transaction,
8258 counterparty_cur_commitment_point,
8259 counterparty_prev_commitment_point,
8260 counterparty_node_id,
8262 counterparty_shutdown_scriptpubkey,
8266 channel_update_status,
8267 closing_signed_in_flight: false,
8271 #[cfg(any(test, fuzzing))]
8272 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8273 #[cfg(any(test, fuzzing))]
8274 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8276 workaround_lnd_bug_4006: None,
8277 sent_message_awaiting_response: None,
8279 latest_inbound_scid_alias,
8280 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8281 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8283 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8284 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8286 #[cfg(any(test, fuzzing))]
8287 historical_inbound_htlc_fulfills,
8289 channel_type: channel_type.unwrap(),
8292 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8301 use bitcoin::blockdata::constants::ChainHash;
8302 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8303 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8304 use bitcoin::blockdata::opcodes;
8305 use bitcoin::network::constants::Network;
8306 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8307 use crate::ln::{PaymentHash, PaymentPreimage};
8308 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8309 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8310 use crate::ln::channel::InitFeatures;
8311 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8312 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8313 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8314 use crate::ln::msgs;
8315 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8316 use crate::ln::script::ShutdownScript;
8317 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8318 use crate::chain::BestBlock;
8319 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8320 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8321 use crate::chain::transaction::OutPoint;
8322 use crate::routing::router::{Path, RouteHop};
8323 use crate::util::config::UserConfig;
8324 use crate::util::errors::APIError;
8325 use crate::util::ser::{ReadableArgs, Writeable};
8326 use crate::util::test_utils;
8327 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8328 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8329 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8330 use bitcoin::secp256k1::{SecretKey,PublicKey};
8331 use bitcoin::hashes::sha256::Hash as Sha256;
8332 use bitcoin::hashes::Hash;
8333 use bitcoin::hashes::hex::FromHex;
8334 use bitcoin::hash_types::WPubkeyHash;
8335 use bitcoin::blockdata::locktime::absolute::LockTime;
8336 use bitcoin::address::{WitnessProgram, WitnessVersion};
8337 use crate::prelude::*;
8339 struct TestFeeEstimator {
8342 impl FeeEstimator for TestFeeEstimator {
8343 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8349 fn test_max_funding_satoshis_no_wumbo() {
8350 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8351 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8352 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8356 signer: InMemorySigner,
8359 impl EntropySource for Keys {
8360 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8363 impl SignerProvider for Keys {
8364 type EcdsaSigner = InMemorySigner;
8366 type TaprootSigner = InMemorySigner;
8368 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8369 self.signer.channel_keys_id()
8372 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8376 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8378 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8379 let secp_ctx = Secp256k1::signing_only();
8380 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8381 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8382 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8385 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8386 let secp_ctx = Secp256k1::signing_only();
8387 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8388 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8392 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8393 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8394 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8398 fn upfront_shutdown_script_incompatibility() {
8399 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8400 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8401 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8404 let seed = [42; 32];
8405 let network = Network::Testnet;
8406 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8407 keys_provider.expect(OnGetShutdownScriptpubkey {
8408 returns: non_v0_segwit_shutdown_script.clone(),
8411 let secp_ctx = Secp256k1::new();
8412 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8413 let config = UserConfig::default();
8414 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8415 Err(APIError::IncompatibleShutdownScript { script }) => {
8416 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8418 Err(e) => panic!("Unexpected error: {:?}", e),
8419 Ok(_) => panic!("Expected error"),
8423 // Check that, during channel creation, we use the same feerate in the open channel message
8424 // as we do in the Channel object creation itself.
8426 fn test_open_channel_msg_fee() {
8427 let original_fee = 253;
8428 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8429 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8430 let secp_ctx = Secp256k1::new();
8431 let seed = [42; 32];
8432 let network = Network::Testnet;
8433 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8435 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8436 let config = UserConfig::default();
8437 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8439 // Now change the fee so we can check that the fee in the open_channel message is the
8440 // same as the old fee.
8441 fee_est.fee_est = 500;
8442 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8443 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8447 fn test_holder_vs_counterparty_dust_limit() {
8448 // Test that when calculating the local and remote commitment transaction fees, the correct
8449 // dust limits are used.
8450 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8451 let secp_ctx = Secp256k1::new();
8452 let seed = [42; 32];
8453 let network = Network::Testnet;
8454 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8455 let logger = test_utils::TestLogger::new();
8456 let best_block = BestBlock::from_network(network);
8458 // Go through the flow of opening a channel between two nodes, making sure
8459 // they have different dust limits.
8461 // Create Node A's channel pointing to Node B's pubkey
8462 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8463 let config = UserConfig::default();
8464 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8466 // Create Node B's channel by receiving Node A's open_channel message
8467 // Make sure A's dust limit is as we expect.
8468 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8469 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8470 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8472 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8473 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8474 accept_channel_msg.dust_limit_satoshis = 546;
8475 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8476 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8478 // Node A --> Node B: funding created
8479 let output_script = node_a_chan.context.get_funding_redeemscript();
8480 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8481 value: 10000000, script_pubkey: output_script.clone(),
8483 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8484 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8485 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8487 // Node B --> Node A: funding signed
8488 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8489 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8491 // Put some inbound and outbound HTLCs in A's channel.
8492 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8493 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8495 amount_msat: htlc_amount_msat,
8496 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8497 cltv_expiry: 300000000,
8498 state: InboundHTLCState::Committed,
8501 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8503 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8504 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8505 cltv_expiry: 200000000,
8506 state: OutboundHTLCState::Committed,
8507 source: HTLCSource::OutboundRoute {
8508 path: Path { hops: Vec::new(), blinded_tail: None },
8509 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8510 first_hop_htlc_msat: 548,
8511 payment_id: PaymentId([42; 32]),
8513 skimmed_fee_msat: None,
8514 blinding_point: None,
8517 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8518 // the dust limit check.
8519 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8520 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8521 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8522 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8524 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8525 // of the HTLCs are seen to be above the dust limit.
8526 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8527 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8528 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8529 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8530 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8534 fn test_timeout_vs_success_htlc_dust_limit() {
8535 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8536 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8537 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8538 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8539 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8540 let secp_ctx = Secp256k1::new();
8541 let seed = [42; 32];
8542 let network = Network::Testnet;
8543 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8545 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8546 let config = UserConfig::default();
8547 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8549 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8550 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8552 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8553 // counted as dust when it shouldn't be.
8554 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8555 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8556 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8557 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8559 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8560 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8561 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8562 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8563 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8565 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8567 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8568 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8569 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8570 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8571 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8573 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8574 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8575 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8576 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8577 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8581 fn channel_reestablish_no_updates() {
8582 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8583 let logger = test_utils::TestLogger::new();
8584 let secp_ctx = Secp256k1::new();
8585 let seed = [42; 32];
8586 let network = Network::Testnet;
8587 let best_block = BestBlock::from_network(network);
8588 let chain_hash = ChainHash::using_genesis_block(network);
8589 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8591 // Go through the flow of opening a channel between two nodes.
8593 // Create Node A's channel pointing to Node B's pubkey
8594 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8595 let config = UserConfig::default();
8596 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8598 // Create Node B's channel by receiving Node A's open_channel message
8599 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8600 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8601 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8603 // Node B --> Node A: accept channel
8604 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8605 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8607 // Node A --> Node B: funding created
8608 let output_script = node_a_chan.context.get_funding_redeemscript();
8609 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8610 value: 10000000, script_pubkey: output_script.clone(),
8612 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8613 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8614 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8616 // Node B --> Node A: funding signed
8617 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8618 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8620 // Now disconnect the two nodes and check that the commitment point in
8621 // Node B's channel_reestablish message is sane.
8622 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8623 let msg = node_b_chan.get_channel_reestablish(&&logger);
8624 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8625 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8626 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8628 // Check that the commitment point in Node A's channel_reestablish message
8630 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8631 let msg = node_a_chan.get_channel_reestablish(&&logger);
8632 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8633 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8634 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8638 fn test_configured_holder_max_htlc_value_in_flight() {
8639 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8640 let logger = test_utils::TestLogger::new();
8641 let secp_ctx = Secp256k1::new();
8642 let seed = [42; 32];
8643 let network = Network::Testnet;
8644 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8645 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8646 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8648 let mut config_2_percent = UserConfig::default();
8649 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8650 let mut config_99_percent = UserConfig::default();
8651 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8652 let mut config_0_percent = UserConfig::default();
8653 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8654 let mut config_101_percent = UserConfig::default();
8655 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8657 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8658 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8659 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8660 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8661 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8662 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8664 // Test with the upper bound - 1 of valid values (99%).
8665 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8666 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8667 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8669 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8671 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8672 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8673 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8674 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8675 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8676 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8678 // Test with the upper bound - 1 of valid values (99%).
8679 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8680 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8681 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8683 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8684 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8685 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8686 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8687 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8689 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8690 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8692 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8693 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8694 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8696 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8697 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8698 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8699 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8700 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8702 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8703 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8705 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8706 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8707 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8711 fn test_configured_holder_selected_channel_reserve_satoshis() {
8713 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8714 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8715 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8717 // Test with valid but unreasonably high channel reserves
8718 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8719 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8720 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8722 // Test with calculated channel reserve less than lower bound
8723 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8724 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8726 // Test with invalid channel reserves since sum of both is greater than or equal
8728 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8729 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8732 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8733 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8734 let logger = test_utils::TestLogger::new();
8735 let secp_ctx = Secp256k1::new();
8736 let seed = [42; 32];
8737 let network = Network::Testnet;
8738 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8739 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8740 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8743 let mut outbound_node_config = UserConfig::default();
8744 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8745 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8747 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8748 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8750 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8751 let mut inbound_node_config = UserConfig::default();
8752 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8754 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8755 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8757 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8759 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8760 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8762 // Channel Negotiations failed
8763 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8764 assert!(result.is_err());
8769 fn channel_update() {
8770 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8771 let logger = test_utils::TestLogger::new();
8772 let secp_ctx = Secp256k1::new();
8773 let seed = [42; 32];
8774 let network = Network::Testnet;
8775 let best_block = BestBlock::from_network(network);
8776 let chain_hash = ChainHash::using_genesis_block(network);
8777 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8779 // Create Node A's channel pointing to Node B's pubkey
8780 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8781 let config = UserConfig::default();
8782 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8784 // Create Node B's channel by receiving Node A's open_channel message
8785 // Make sure A's dust limit is as we expect.
8786 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8787 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8788 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8790 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8791 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8792 accept_channel_msg.dust_limit_satoshis = 546;
8793 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8794 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8796 // Node A --> Node B: funding created
8797 let output_script = node_a_chan.context.get_funding_redeemscript();
8798 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8799 value: 10000000, script_pubkey: output_script.clone(),
8801 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8802 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8803 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8805 // Node B --> Node A: funding signed
8806 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8807 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8809 // Make sure that receiving a channel update will update the Channel as expected.
8810 let update = ChannelUpdate {
8811 contents: UnsignedChannelUpdate {
8813 short_channel_id: 0,
8816 cltv_expiry_delta: 100,
8817 htlc_minimum_msat: 5,
8818 htlc_maximum_msat: MAX_VALUE_MSAT,
8820 fee_proportional_millionths: 11,
8821 excess_data: Vec::new(),
8823 signature: Signature::from(unsafe { FFISignature::new() })
8825 assert!(node_a_chan.channel_update(&update).unwrap());
8827 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8828 // change our official htlc_minimum_msat.
8829 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8830 match node_a_chan.context.counterparty_forwarding_info() {
8832 assert_eq!(info.cltv_expiry_delta, 100);
8833 assert_eq!(info.fee_base_msat, 110);
8834 assert_eq!(info.fee_proportional_millionths, 11);
8836 None => panic!("expected counterparty forwarding info to be Some")
8839 assert!(!node_a_chan.channel_update(&update).unwrap());
8843 fn blinding_point_skimmed_fee_malformed_ser() {
8844 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
8846 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8847 let secp_ctx = Secp256k1::new();
8848 let seed = [42; 32];
8849 let network = Network::Testnet;
8850 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8852 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8853 let config = UserConfig::default();
8854 let features = channelmanager::provided_init_features(&config);
8855 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8856 let mut chan = Channel { context: outbound_chan.context };
8858 let dummy_htlc_source = HTLCSource::OutboundRoute {
8860 hops: vec![RouteHop {
8861 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8862 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8863 cltv_expiry_delta: 0, maybe_announced_channel: false,
8867 session_priv: test_utils::privkey(42),
8868 first_hop_htlc_msat: 0,
8869 payment_id: PaymentId([42; 32]),
8871 let dummy_outbound_output = OutboundHTLCOutput {
8874 payment_hash: PaymentHash([43; 32]),
8876 state: OutboundHTLCState::Committed,
8877 source: dummy_htlc_source.clone(),
8878 skimmed_fee_msat: None,
8879 blinding_point: None,
8881 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8882 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8884 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8887 htlc.skimmed_fee_msat = Some(1);
8890 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8892 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8895 payment_hash: PaymentHash([43; 32]),
8896 source: dummy_htlc_source.clone(),
8897 onion_routing_packet: msgs::OnionPacket {
8899 public_key: Ok(test_utils::pubkey(1)),
8900 hop_data: [0; 20*65],
8903 skimmed_fee_msat: None,
8904 blinding_point: None,
8906 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8907 payment_preimage: PaymentPreimage([42; 32]),
8910 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
8911 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
8913 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
8914 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
8916 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
8919 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8920 } else if i % 5 == 1 {
8921 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8922 } else if i % 5 == 2 {
8923 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8924 if let HTLCUpdateAwaitingACK::AddHTLC {
8925 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8926 } = &mut dummy_add {
8927 *blinding_point = Some(test_utils::pubkey(42 + i));
8928 *skimmed_fee_msat = Some(42);
8930 holding_cell_htlc_updates.push(dummy_add);
8931 } else if i % 5 == 3 {
8932 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
8934 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
8937 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8939 // Encode and decode the channel and ensure that the HTLCs within are the same.
8940 let encoded_chan = chan.encode();
8941 let mut s = crate::io::Cursor::new(&encoded_chan);
8942 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8943 let features = channelmanager::provided_channel_type_features(&config);
8944 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8945 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8946 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8949 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8951 fn outbound_commitment_test() {
8952 use bitcoin::sighash;
8953 use bitcoin::consensus::encode::serialize;
8954 use bitcoin::sighash::EcdsaSighashType;
8955 use bitcoin::hashes::hex::FromHex;
8956 use bitcoin::hash_types::Txid;
8957 use bitcoin::secp256k1::Message;
8958 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8959 use crate::ln::PaymentPreimage;
8960 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8961 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8962 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8963 use crate::util::logger::Logger;
8964 use crate::sync::Arc;
8965 use core::str::FromStr;
8966 use hex::DisplayHex;
8968 // Test vectors from BOLT 3 Appendices C and F (anchors):
8969 let feeest = TestFeeEstimator{fee_est: 15000};
8970 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
8971 let secp_ctx = Secp256k1::new();
8973 let mut signer = InMemorySigner::new(
8975 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8976 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8977 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8978 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8979 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8981 // These aren't set in the test vectors:
8982 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8988 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8989 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8990 let keys_provider = Keys { signer: signer.clone() };
8992 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8993 let mut config = UserConfig::default();
8994 config.channel_handshake_config.announced_channel = false;
8995 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8996 chan.context.holder_dust_limit_satoshis = 546;
8997 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8999 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9001 let counterparty_pubkeys = ChannelPublicKeys {
9002 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9003 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9004 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9005 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9006 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9008 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9009 CounterpartyChannelTransactionParameters {
9010 pubkeys: counterparty_pubkeys.clone(),
9011 selected_contest_delay: 144
9013 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9014 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9016 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9017 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9019 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9020 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9022 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9023 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9025 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9026 // derived from a commitment_seed, so instead we copy it here and call
9027 // build_commitment_transaction.
9028 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9029 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9030 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9031 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9032 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9034 macro_rules! test_commitment {
9035 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9036 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9037 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9041 macro_rules! test_commitment_with_anchors {
9042 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9043 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9044 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9048 macro_rules! test_commitment_common {
9049 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9050 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9052 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9053 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9055 let htlcs = commitment_stats.htlcs_included.drain(..)
9056 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9058 (commitment_stats.tx, htlcs)
9060 let trusted_tx = commitment_tx.trust();
9061 let unsigned_tx = trusted_tx.built_transaction();
9062 let redeemscript = chan.context.get_funding_redeemscript();
9063 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9064 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9065 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9066 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9068 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9069 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9070 let mut counterparty_htlc_sigs = Vec::new();
9071 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9073 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9074 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9075 counterparty_htlc_sigs.push(remote_signature);
9077 assert_eq!(htlcs.len(), per_htlc.len());
9079 let holder_commitment_tx = HolderCommitmentTransaction::new(
9080 commitment_tx.clone(),
9081 counterparty_signature,
9082 counterparty_htlc_sigs,
9083 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9084 chan.context.counterparty_funding_pubkey()
9086 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9087 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9089 let funding_redeemscript = chan.context.get_funding_redeemscript();
9090 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9091 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9093 // ((htlc, counterparty_sig), (index, holder_sig))
9094 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9097 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9098 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9100 let ref htlc = htlcs[$htlc_idx];
9101 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9102 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9103 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9104 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9105 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9106 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9107 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9109 let mut preimage: Option<PaymentPreimage> = None;
9112 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9113 if out == htlc.payment_hash {
9114 preimage = Some(PaymentPreimage([i; 32]));
9118 assert!(preimage.is_some());
9121 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9122 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9123 channel_derivation_parameters: ChannelDerivationParameters {
9124 value_satoshis: chan.context.channel_value_satoshis,
9125 keys_id: chan.context.channel_keys_id,
9126 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9128 commitment_txid: trusted_tx.txid(),
9129 per_commitment_number: trusted_tx.commitment_number(),
9130 per_commitment_point: trusted_tx.per_commitment_point(),
9131 feerate_per_kw: trusted_tx.feerate_per_kw(),
9133 preimage: preimage.clone(),
9134 counterparty_sig: *htlc_counterparty_sig,
9135 }, &secp_ctx).unwrap();
9136 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9137 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9139 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9140 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9141 let trusted_tx = holder_commitment_tx.trust();
9142 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9143 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9144 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9146 assert!(htlc_counterparty_sig_iter.next().is_none());
9150 // anchors: simple commitment tx with no HTLCs and single anchor
9151 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9152 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9153 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9155 // simple commitment tx with no HTLCs
9156 chan.context.value_to_self_msat = 7000000000;
9158 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9159 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9160 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9162 // anchors: simple commitment tx with no HTLCs
9163 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9164 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9165 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9167 chan.context.pending_inbound_htlcs.push({
9168 let mut out = InboundHTLCOutput{
9170 amount_msat: 1000000,
9172 payment_hash: PaymentHash([0; 32]),
9173 state: InboundHTLCState::Committed,
9175 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9178 chan.context.pending_inbound_htlcs.push({
9179 let mut out = InboundHTLCOutput{
9181 amount_msat: 2000000,
9183 payment_hash: PaymentHash([0; 32]),
9184 state: InboundHTLCState::Committed,
9186 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9189 chan.context.pending_outbound_htlcs.push({
9190 let mut out = OutboundHTLCOutput{
9192 amount_msat: 2000000,
9194 payment_hash: PaymentHash([0; 32]),
9195 state: OutboundHTLCState::Committed,
9196 source: HTLCSource::dummy(),
9197 skimmed_fee_msat: None,
9198 blinding_point: None,
9200 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9203 chan.context.pending_outbound_htlcs.push({
9204 let mut out = OutboundHTLCOutput{
9206 amount_msat: 3000000,
9208 payment_hash: PaymentHash([0; 32]),
9209 state: OutboundHTLCState::Committed,
9210 source: HTLCSource::dummy(),
9211 skimmed_fee_msat: None,
9212 blinding_point: None,
9214 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9217 chan.context.pending_inbound_htlcs.push({
9218 let mut out = InboundHTLCOutput{
9220 amount_msat: 4000000,
9222 payment_hash: PaymentHash([0; 32]),
9223 state: InboundHTLCState::Committed,
9225 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9229 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9230 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9231 chan.context.feerate_per_kw = 0;
9233 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9234 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9235 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9238 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9239 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9240 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9243 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9244 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9245 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9248 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9249 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9250 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9253 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9254 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9255 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9258 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9259 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9260 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9263 // commitment tx with seven outputs untrimmed (maximum feerate)
9264 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9265 chan.context.feerate_per_kw = 647;
9267 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9268 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9269 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9272 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9273 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9274 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9277 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9278 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9279 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9282 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9283 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9284 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9287 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9288 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9289 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9292 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9293 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9294 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9297 // commitment tx with six outputs untrimmed (minimum feerate)
9298 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9299 chan.context.feerate_per_kw = 648;
9301 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9302 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9303 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9306 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9307 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9308 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9311 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9312 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9313 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9316 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9317 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9318 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9321 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9322 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9323 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9326 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9327 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9328 chan.context.feerate_per_kw = 645;
9329 chan.context.holder_dust_limit_satoshis = 1001;
9331 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9332 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9333 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9336 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9337 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9338 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9341 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9342 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9343 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9346 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9347 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9348 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9351 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9352 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9353 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9356 // commitment tx with six outputs untrimmed (maximum feerate)
9357 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9358 chan.context.feerate_per_kw = 2069;
9359 chan.context.holder_dust_limit_satoshis = 546;
9361 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9362 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9363 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9366 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9367 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9368 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9371 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9372 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9373 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9376 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9377 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9378 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9381 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9382 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9383 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9386 // commitment tx with five outputs untrimmed (minimum feerate)
9387 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9388 chan.context.feerate_per_kw = 2070;
9390 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9391 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9392 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9395 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9396 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9397 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9400 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9401 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9402 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9405 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9406 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9407 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9410 // commitment tx with five outputs untrimmed (maximum feerate)
9411 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9412 chan.context.feerate_per_kw = 2194;
9414 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9415 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9416 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9419 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9420 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9421 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9424 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9425 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9426 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9429 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9430 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9431 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9434 // commitment tx with four outputs untrimmed (minimum feerate)
9435 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9436 chan.context.feerate_per_kw = 2195;
9438 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9439 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9440 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9443 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9444 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9445 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9448 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9449 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9450 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9453 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9454 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9455 chan.context.feerate_per_kw = 2185;
9456 chan.context.holder_dust_limit_satoshis = 2001;
9457 let cached_channel_type = chan.context.channel_type;
9458 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9460 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9461 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9462 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9465 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9466 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9467 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9470 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9471 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9472 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9475 // commitment tx with four outputs untrimmed (maximum feerate)
9476 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9477 chan.context.feerate_per_kw = 3702;
9478 chan.context.holder_dust_limit_satoshis = 546;
9479 chan.context.channel_type = cached_channel_type.clone();
9481 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9482 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9483 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9486 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9487 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9488 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9491 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9492 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9493 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9496 // commitment tx with three outputs untrimmed (minimum feerate)
9497 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9498 chan.context.feerate_per_kw = 3703;
9500 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9501 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9502 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9505 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9506 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9507 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9510 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9511 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9512 chan.context.feerate_per_kw = 3687;
9513 chan.context.holder_dust_limit_satoshis = 3001;
9514 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9516 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9517 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9518 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9521 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9522 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9523 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9526 // commitment tx with three outputs untrimmed (maximum feerate)
9527 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9528 chan.context.feerate_per_kw = 4914;
9529 chan.context.holder_dust_limit_satoshis = 546;
9530 chan.context.channel_type = cached_channel_type.clone();
9532 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9533 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9534 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9537 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9538 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9539 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9542 // commitment tx with two outputs untrimmed (minimum feerate)
9543 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9544 chan.context.feerate_per_kw = 4915;
9545 chan.context.holder_dust_limit_satoshis = 546;
9547 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9548 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9549 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9551 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9552 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9553 chan.context.feerate_per_kw = 4894;
9554 chan.context.holder_dust_limit_satoshis = 4001;
9555 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9557 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9558 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9559 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9561 // commitment tx with two outputs untrimmed (maximum feerate)
9562 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9563 chan.context.feerate_per_kw = 9651180;
9564 chan.context.holder_dust_limit_satoshis = 546;
9565 chan.context.channel_type = cached_channel_type.clone();
9567 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9568 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9569 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9571 // commitment tx with one output untrimmed (minimum feerate)
9572 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9573 chan.context.feerate_per_kw = 9651181;
9575 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9576 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9577 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9579 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9580 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9581 chan.context.feerate_per_kw = 6216010;
9582 chan.context.holder_dust_limit_satoshis = 4001;
9583 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9585 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9586 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9587 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9589 // commitment tx with fee greater than funder amount
9590 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9591 chan.context.feerate_per_kw = 9651936;
9592 chan.context.holder_dust_limit_satoshis = 546;
9593 chan.context.channel_type = cached_channel_type;
9595 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9596 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9597 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9599 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9600 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9601 chan.context.feerate_per_kw = 253;
9602 chan.context.pending_inbound_htlcs.clear();
9603 chan.context.pending_inbound_htlcs.push({
9604 let mut out = InboundHTLCOutput{
9606 amount_msat: 2000000,
9608 payment_hash: PaymentHash([0; 32]),
9609 state: InboundHTLCState::Committed,
9611 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9614 chan.context.pending_outbound_htlcs.clear();
9615 chan.context.pending_outbound_htlcs.push({
9616 let mut out = OutboundHTLCOutput{
9618 amount_msat: 5000001,
9620 payment_hash: PaymentHash([0; 32]),
9621 state: OutboundHTLCState::Committed,
9622 source: HTLCSource::dummy(),
9623 skimmed_fee_msat: None,
9624 blinding_point: None,
9626 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9629 chan.context.pending_outbound_htlcs.push({
9630 let mut out = OutboundHTLCOutput{
9632 amount_msat: 5000000,
9634 payment_hash: PaymentHash([0; 32]),
9635 state: OutboundHTLCState::Committed,
9636 source: HTLCSource::dummy(),
9637 skimmed_fee_msat: None,
9638 blinding_point: None,
9640 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9644 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9645 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9646 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9649 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9650 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9651 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9653 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9654 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9655 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9657 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9658 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9659 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9662 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9663 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9664 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9665 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9668 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9669 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9670 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9672 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9673 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9674 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9676 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9677 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9678 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9683 fn test_per_commitment_secret_gen() {
9684 // Test vectors from BOLT 3 Appendix D:
9686 let mut seed = [0; 32];
9687 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9688 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9689 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9691 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9692 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9693 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9695 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9696 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9698 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9699 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9701 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9702 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9703 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9707 fn test_key_derivation() {
9708 // Test vectors from BOLT 3 Appendix E:
9709 let secp_ctx = Secp256k1::new();
9711 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9712 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9714 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9715 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9717 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9718 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9720 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9721 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9723 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9724 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9726 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9727 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9731 fn test_zero_conf_channel_type_support() {
9732 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9733 let secp_ctx = Secp256k1::new();
9734 let seed = [42; 32];
9735 let network = Network::Testnet;
9736 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9737 let logger = test_utils::TestLogger::new();
9739 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9740 let config = UserConfig::default();
9741 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9742 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9744 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9745 channel_type_features.set_zero_conf_required();
9747 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9748 open_channel_msg.channel_type = Some(channel_type_features);
9749 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9750 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9751 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9752 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9753 assert!(res.is_ok());
9757 fn test_supports_anchors_zero_htlc_tx_fee() {
9758 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9759 // resulting `channel_type`.
9760 let secp_ctx = Secp256k1::new();
9761 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9762 let network = Network::Testnet;
9763 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9764 let logger = test_utils::TestLogger::new();
9766 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9767 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9769 let mut config = UserConfig::default();
9770 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9772 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9773 // need to signal it.
9774 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9775 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9776 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9777 &config, 0, 42, None
9779 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9781 let mut expected_channel_type = ChannelTypeFeatures::empty();
9782 expected_channel_type.set_static_remote_key_required();
9783 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9785 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9786 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9787 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9791 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9792 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9793 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9794 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9795 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9798 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9799 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9803 fn test_rejects_implicit_simple_anchors() {
9804 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9805 // each side's `InitFeatures`, it is rejected.
9806 let secp_ctx = Secp256k1::new();
9807 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9808 let network = Network::Testnet;
9809 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9810 let logger = test_utils::TestLogger::new();
9812 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9813 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9815 let config = UserConfig::default();
9817 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9818 let static_remote_key_required: u64 = 1 << 12;
9819 let simple_anchors_required: u64 = 1 << 20;
9820 let raw_init_features = static_remote_key_required | simple_anchors_required;
9821 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9823 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9824 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9825 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9829 // Set `channel_type` to `None` to force the implicit feature negotiation.
9830 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9831 open_channel_msg.channel_type = None;
9833 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9834 // `static_remote_key`, it will fail the channel.
9835 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9836 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9837 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9838 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9840 assert!(channel_b.is_err());
9844 fn test_rejects_simple_anchors_channel_type() {
9845 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9847 let secp_ctx = Secp256k1::new();
9848 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9849 let network = Network::Testnet;
9850 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9851 let logger = test_utils::TestLogger::new();
9853 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9854 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9856 let config = UserConfig::default();
9858 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9859 let static_remote_key_required: u64 = 1 << 12;
9860 let simple_anchors_required: u64 = 1 << 20;
9861 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9862 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9863 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9864 assert!(!simple_anchors_init.requires_unknown_bits());
9865 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9867 // First, we'll try to open a channel between A and B where A requests a channel type for
9868 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9869 // B as it's not supported by LDK.
9870 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9871 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9872 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9876 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9877 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9879 let res = InboundV1Channel::<&TestKeysInterface>::new(
9880 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9881 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9882 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9884 assert!(res.is_err());
9886 // Then, we'll try to open another channel where A requests a channel type for
9887 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9888 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9890 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9891 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9892 10000000, 100000, 42, &config, 0, 42, None
9895 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9897 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9898 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9899 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9900 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9903 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9904 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9906 let res = channel_a.accept_channel(
9907 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9909 assert!(res.is_err());
9913 fn test_waiting_for_batch() {
9914 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9915 let logger = test_utils::TestLogger::new();
9916 let secp_ctx = Secp256k1::new();
9917 let seed = [42; 32];
9918 let network = Network::Testnet;
9919 let best_block = BestBlock::from_network(network);
9920 let chain_hash = ChainHash::using_genesis_block(network);
9921 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9923 let mut config = UserConfig::default();
9924 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9925 // channel in a batch before all channels are ready.
9926 config.channel_handshake_limits.trust_own_funding_0conf = true;
9928 // Create a channel from node a to node b that will be part of batch funding.
9929 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9930 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9935 &channelmanager::provided_init_features(&config),
9945 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9946 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9947 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9952 &channelmanager::provided_channel_type_features(&config),
9953 &channelmanager::provided_init_features(&config),
9959 true, // Allow node b to send a 0conf channel_ready.
9962 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9963 node_a_chan.accept_channel(
9964 &accept_channel_msg,
9965 &config.channel_handshake_limits,
9966 &channelmanager::provided_init_features(&config),
9969 // Fund the channel with a batch funding transaction.
9970 let output_script = node_a_chan.context.get_funding_redeemscript();
9971 let tx = Transaction {
9973 lock_time: LockTime::ZERO,
9977 value: 10000000, script_pubkey: output_script.clone(),
9980 value: 10000000, script_pubkey: Builder::new().into_script(),
9983 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9984 let funding_created_msg = node_a_chan.get_funding_created(
9985 tx.clone(), funding_outpoint, true, &&logger,
9986 ).map_err(|_| ()).unwrap();
9987 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9988 &funding_created_msg.unwrap(),
9992 ).map_err(|_| ()).unwrap();
9993 let node_b_updates = node_b_chan.monitor_updating_restored(
10001 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10002 // broadcasting the funding transaction until the batch is ready.
10003 let res = node_a_chan.funding_signed(
10004 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10006 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10007 let node_a_updates = node_a_chan.monitor_updating_restored(
10014 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10015 // as the funding transaction depends on all channels in the batch becoming ready.
10016 assert!(node_a_updates.channel_ready.is_none());
10017 assert!(node_a_updates.funding_broadcastable.is_none());
10018 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10020 // It is possible to receive a 0conf channel_ready from the remote node.
10021 node_a_chan.channel_ready(
10022 &node_b_updates.channel_ready.unwrap(),
10030 node_a_chan.context.channel_state,
10031 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10034 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10035 node_a_chan.set_batch_ready();
10036 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10037 assert!(node_a_chan.check_get_channel_ready(0).is_some());